blob: 40e4d3798d162de2c29e72634e12d32070f5874a [file] [log] [blame]
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001/* Virtio ring implementation.
2 *
3 * Copyright 2007 Rusty Russell IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include <linux/virtio.h>
20#include <linux/virtio_ring.h>
Rusty Russelle34f8722008-07-25 12:06:13 -050021#include <linux/virtio_config.h>
Rusty Russell0a8a69d2007-10-22 11:03:40 +100022#include <linux/device.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
Paul Gortmakerb5a2c4f2011-07-03 16:20:30 -040024#include <linux/module.h>
Rusty Russelle93300b2012-01-12 15:44:43 +103025#include <linux/hrtimer.h>
Andy Lutomirski780bc792016-02-02 21:46:36 -080026#include <linux/dma-mapping.h>
Andy Lutomirski78fe3982016-02-02 21:46:40 -080027#include <xen/xen.h>
Rusty Russell0a8a69d2007-10-22 11:03:40 +100028
29#ifdef DEBUG
30/* For development, we want to crash whenever the ring is screwed. */
Rusty Russell9499f5e2009-06-12 22:16:35 -060031#define BAD_RING(_vq, fmt, args...) \
32 do { \
33 dev_err(&(_vq)->vq.vdev->dev, \
34 "%s:"fmt, (_vq)->vq.name, ##args); \
35 BUG(); \
36 } while (0)
Rusty Russellc5f841f2009-03-30 21:55:22 -060037/* Caller is supposed to guarantee no reentry. */
38#define START_USE(_vq) \
39 do { \
40 if ((_vq)->in_use) \
Rusty Russell9499f5e2009-06-12 22:16:35 -060041 panic("%s:in_use = %i\n", \
42 (_vq)->vq.name, (_vq)->in_use); \
Rusty Russellc5f841f2009-03-30 21:55:22 -060043 (_vq)->in_use = __LINE__; \
Rusty Russell9499f5e2009-06-12 22:16:35 -060044 } while (0)
Roel Kluin3a35ce72009-01-22 16:42:57 +010045#define END_USE(_vq) \
Rusty Russell97a545a2010-02-24 14:22:22 -060046 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
Tiwei Bie4d6a1052018-11-21 18:03:22 +080047#define LAST_ADD_TIME_UPDATE(_vq) \
48 do { \
49 ktime_t now = ktime_get(); \
50 \
51 /* No kick or get, with .1 second between? Warn. */ \
52 if ((_vq)->last_add_time_valid) \
53 WARN_ON(ktime_to_ms(ktime_sub(now, \
54 (_vq)->last_add_time)) > 100); \
55 (_vq)->last_add_time = now; \
56 (_vq)->last_add_time_valid = true; \
57 } while (0)
58#define LAST_ADD_TIME_CHECK(_vq) \
59 do { \
60 if ((_vq)->last_add_time_valid) { \
61 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
62 (_vq)->last_add_time)) > 100); \
63 } \
64 } while (0)
65#define LAST_ADD_TIME_INVALID(_vq) \
66 ((_vq)->last_add_time_valid = false)
Rusty Russell0a8a69d2007-10-22 11:03:40 +100067#else
Rusty Russell9499f5e2009-06-12 22:16:35 -060068#define BAD_RING(_vq, fmt, args...) \
69 do { \
70 dev_err(&_vq->vq.vdev->dev, \
71 "%s:"fmt, (_vq)->vq.name, ##args); \
72 (_vq)->broken = true; \
73 } while (0)
Rusty Russell0a8a69d2007-10-22 11:03:40 +100074#define START_USE(vq)
75#define END_USE(vq)
Tiwei Bie4d6a1052018-11-21 18:03:22 +080076#define LAST_ADD_TIME_UPDATE(vq)
77#define LAST_ADD_TIME_CHECK(vq)
78#define LAST_ADD_TIME_INVALID(vq)
Rusty Russell0a8a69d2007-10-22 11:03:40 +100079#endif
80
Tiwei Biecbeedb72018-11-21 18:03:24 +080081struct vring_desc_state_split {
Andy Lutomirski780bc792016-02-02 21:46:36 -080082 void *data; /* Data for callback. */
83 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
84};
85
Tiwei Bie1ce9e602018-11-21 18:03:27 +080086struct vring_desc_state_packed {
87 void *data; /* Data for callback. */
88 struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
89 u16 num; /* Descriptor list length. */
90 u16 next; /* The next desc state in a list. */
91 u16 last; /* The last desc state in a list. */
92};
93
94struct vring_desc_extra_packed {
95 dma_addr_t addr; /* Buffer DMA addr. */
96 u32 len; /* Buffer length. */
97 u16 flags; /* Descriptor flags. */
98};
99
Michael S. Tsirkin43b4f722015-01-15 13:33:31 +0200100struct vring_virtqueue {
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000101 struct virtqueue vq;
102
Tiwei Bie1ce9e602018-11-21 18:03:27 +0800103 /* Is this a packed ring? */
104 bool packed_ring;
105
Tiwei Biefb3fba62018-11-21 18:03:26 +0800106 /* Is DMA API used? */
107 bool use_dma_api;
108
Rusty Russell7b21e342012-01-12 15:44:42 +1030109 /* Can we use weak barriers? */
110 bool weak_barriers;
111
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000112 /* Other side has made a mess, don't try any more. */
113 bool broken;
114
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100115 /* Host supports indirect buffers */
116 bool indirect;
117
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300118 /* Host publishes avail event idx */
119 bool event;
120
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000121 /* Head of free buffer list. */
122 unsigned int free_head;
123 /* Number we've added since last sync. */
124 unsigned int num_added;
125
126 /* Last used index we've seen. */
Anthony Liguori1bc49532007-11-07 15:49:24 -0600127 u16 last_used_idx;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000128
Tiwei Bie1ce9e602018-11-21 18:03:27 +0800129 union {
130 /* Available for split ring */
131 struct {
132 /* Actual memory layout for this queue. */
133 struct vring vring;
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800134
Tiwei Bie1ce9e602018-11-21 18:03:27 +0800135 /* Last written value to avail->flags */
136 u16 avail_flags_shadow;
Tiwei Biee593bf92018-11-21 18:03:21 +0800137
Tiwei Bie1ce9e602018-11-21 18:03:27 +0800138 /*
139 * Last written value to avail->idx in
140 * guest byte order.
141 */
142 u16 avail_idx_shadow;
Tiwei Biecbeedb72018-11-21 18:03:24 +0800143
Tiwei Bie1ce9e602018-11-21 18:03:27 +0800144 /* Per-descriptor state. */
145 struct vring_desc_state_split *desc_state;
Tiwei Bied79dca72018-11-21 18:03:25 +0800146
Tiwei Bie1ce9e602018-11-21 18:03:27 +0800147 /* DMA address and size information */
148 dma_addr_t queue_dma_addr;
149 size_t queue_size_in_bytes;
150 } split;
151
152 /* Available for packed ring */
153 struct {
154 /* Actual memory layout for this queue. */
155 struct vring_packed vring;
156
157 /* Driver ring wrap counter. */
158 bool avail_wrap_counter;
159
160 /* Device ring wrap counter. */
161 bool used_wrap_counter;
162
163 /* Avail used flags. */
164 u16 avail_used_flags;
165
166 /* Index of the next avail descriptor. */
167 u16 next_avail_idx;
168
169 /*
170 * Last written value to driver->flags in
171 * guest byte order.
172 */
173 u16 event_flags_shadow;
174
175 /* Per-descriptor state. */
176 struct vring_desc_state_packed *desc_state;
177 struct vring_desc_extra_packed *desc_extra;
178
179 /* DMA address and size information */
180 dma_addr_t ring_dma_addr;
181 dma_addr_t driver_event_dma_addr;
182 dma_addr_t device_event_dma_addr;
183 size_t ring_size_in_bytes;
184 size_t event_size_in_bytes;
185 } packed;
186 };
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800187
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000188 /* How to notify other side. FIXME: commonalize hcalls! */
Heinz Graalfs46f9c2b2013-10-29 09:38:50 +1030189 bool (*notify)(struct virtqueue *vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000190
Andy Lutomirski2a2d1382016-02-02 21:46:37 -0800191 /* DMA, allocation, and size information */
192 bool we_own_ring;
Andy Lutomirski2a2d1382016-02-02 21:46:37 -0800193
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000194#ifdef DEBUG
195 /* They're supposed to lock for us. */
196 unsigned int in_use;
Rusty Russelle93300b2012-01-12 15:44:43 +1030197
198 /* Figure out if their kicks are too delayed. */
199 bool last_add_time_valid;
200 ktime_t last_add_time;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000201#endif
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000202};
203
Tiwei Biee6f633e2018-11-21 18:03:20 +0800204
205/*
206 * Helpers.
207 */
208
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000209#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
210
Tiwei Bie2f18c2d2018-11-21 18:03:23 +0800211static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
212 unsigned int total_sg)
213{
214 struct vring_virtqueue *vq = to_vvq(_vq);
215
216 /*
217 * If the host supports indirect descriptor tables, and we have multiple
218 * buffers, then go indirect. FIXME: tune this threshold
219 */
220 return (vq->indirect && total_sg > 1 && vq->vq.num_free);
221}
222
Andy Lutomirskid26c96c2016-02-02 21:46:35 -0800223/*
Michael S. Tsirkin1a937692016-04-18 12:58:14 +0300224 * Modern virtio devices have feature bits to specify whether they need a
225 * quirk and bypass the IOMMU. If not there, just use the DMA API.
226 *
227 * If there, the interaction between virtio and DMA API is messy.
Andy Lutomirskid26c96c2016-02-02 21:46:35 -0800228 *
229 * On most systems with virtio, physical addresses match bus addresses,
230 * and it doesn't particularly matter whether we use the DMA API.
231 *
232 * On some systems, including Xen and any system with a physical device
233 * that speaks virtio behind a physical IOMMU, we must use the DMA API
234 * for virtio DMA to work at all.
235 *
236 * On other systems, including SPARC and PPC64, virtio-pci devices are
237 * enumerated as though they are behind an IOMMU, but the virtio host
238 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
239 * there or somehow map everything as the identity.
240 *
241 * For the time being, we preserve historic behavior and bypass the DMA
242 * API.
Michael S. Tsirkin1a937692016-04-18 12:58:14 +0300243 *
244 * TODO: install a per-device DMA ops structure that does the right thing
245 * taking into account all the above quirks, and use the DMA API
246 * unconditionally on data path.
Andy Lutomirskid26c96c2016-02-02 21:46:35 -0800247 */
248
249static bool vring_use_dma_api(struct virtio_device *vdev)
250{
Michael S. Tsirkin1a937692016-04-18 12:58:14 +0300251 if (!virtio_has_iommu_quirk(vdev))
252 return true;
253
254 /* Otherwise, we are left to guess. */
Andy Lutomirski78fe3982016-02-02 21:46:40 -0800255 /*
256 * In theory, it's possible to have a buggy QEMU-supposed
257 * emulated Q35 IOMMU and Xen enabled at the same time. On
258 * such a configuration, virtio has never worked and will
259 * not work without an even larger kludge. Instead, enable
260 * the DMA API if we're a Xen guest, which at least allows
261 * all of the sensible Xen configurations to work correctly.
262 */
263 if (xen_domain())
264 return true;
265
Andy Lutomirskid26c96c2016-02-02 21:46:35 -0800266 return false;
267}
268
Tiwei Bied79dca72018-11-21 18:03:25 +0800269static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
270 dma_addr_t *dma_handle, gfp_t flag)
271{
272 if (vring_use_dma_api(vdev)) {
273 return dma_alloc_coherent(vdev->dev.parent, size,
274 dma_handle, flag);
275 } else {
276 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
277
278 if (queue) {
279 phys_addr_t phys_addr = virt_to_phys(queue);
280 *dma_handle = (dma_addr_t)phys_addr;
281
282 /*
283 * Sanity check: make sure we dind't truncate
284 * the address. The only arches I can find that
285 * have 64-bit phys_addr_t but 32-bit dma_addr_t
286 * are certain non-highmem MIPS and x86
287 * configurations, but these configurations
288 * should never allocate physical pages above 32
289 * bits, so this is fine. Just in case, throw a
290 * warning and abort if we end up with an
291 * unrepresentable address.
292 */
293 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
294 free_pages_exact(queue, PAGE_ALIGN(size));
295 return NULL;
296 }
297 }
298 return queue;
299 }
300}
301
302static void vring_free_queue(struct virtio_device *vdev, size_t size,
303 void *queue, dma_addr_t dma_handle)
304{
305 if (vring_use_dma_api(vdev))
306 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
307 else
308 free_pages_exact(queue, PAGE_ALIGN(size));
309}
310
Andy Lutomirski780bc792016-02-02 21:46:36 -0800311/*
312 * The DMA ops on various arches are rather gnarly right now, and
313 * making all of the arch DMA ops work on the vring device itself
314 * is a mess. For now, we use the parent device for DMA ops.
315 */
Michael S. Tsirkin75bfa812016-10-31 00:38:21 +0200316static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
Andy Lutomirski780bc792016-02-02 21:46:36 -0800317{
318 return vq->vq.vdev->dev.parent;
319}
320
321/* Map one sg entry. */
322static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
323 struct scatterlist *sg,
324 enum dma_data_direction direction)
325{
Tiwei Biefb3fba62018-11-21 18:03:26 +0800326 if (!vq->use_dma_api)
Andy Lutomirski780bc792016-02-02 21:46:36 -0800327 return (dma_addr_t)sg_phys(sg);
328
329 /*
330 * We can't use dma_map_sg, because we don't use scatterlists in
331 * the way it expects (we don't guarantee that the scatterlist
332 * will exist for the lifetime of the mapping).
333 */
334 return dma_map_page(vring_dma_dev(vq),
335 sg_page(sg), sg->offset, sg->length,
336 direction);
337}
338
339static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
340 void *cpu_addr, size_t size,
341 enum dma_data_direction direction)
342{
Tiwei Biefb3fba62018-11-21 18:03:26 +0800343 if (!vq->use_dma_api)
Andy Lutomirski780bc792016-02-02 21:46:36 -0800344 return (dma_addr_t)virt_to_phys(cpu_addr);
345
346 return dma_map_single(vring_dma_dev(vq),
347 cpu_addr, size, direction);
348}
349
Tiwei Biee6f633e2018-11-21 18:03:20 +0800350static int vring_mapping_error(const struct vring_virtqueue *vq,
351 dma_addr_t addr)
352{
Tiwei Biefb3fba62018-11-21 18:03:26 +0800353 if (!vq->use_dma_api)
Tiwei Biee6f633e2018-11-21 18:03:20 +0800354 return 0;
355
356 return dma_mapping_error(vring_dma_dev(vq), addr);
357}
358
359
360/*
361 * Split ring specific functions - *_split().
362 */
363
Tiwei Bie138fd252018-11-21 18:03:19 +0800364static void vring_unmap_one_split(const struct vring_virtqueue *vq,
365 struct vring_desc *desc)
Andy Lutomirski780bc792016-02-02 21:46:36 -0800366{
367 u16 flags;
368
Tiwei Biefb3fba62018-11-21 18:03:26 +0800369 if (!vq->use_dma_api)
Andy Lutomirski780bc792016-02-02 21:46:36 -0800370 return;
371
372 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
373
374 if (flags & VRING_DESC_F_INDIRECT) {
375 dma_unmap_single(vring_dma_dev(vq),
376 virtio64_to_cpu(vq->vq.vdev, desc->addr),
377 virtio32_to_cpu(vq->vq.vdev, desc->len),
378 (flags & VRING_DESC_F_WRITE) ?
379 DMA_FROM_DEVICE : DMA_TO_DEVICE);
380 } else {
381 dma_unmap_page(vring_dma_dev(vq),
382 virtio64_to_cpu(vq->vq.vdev, desc->addr),
383 virtio32_to_cpu(vq->vq.vdev, desc->len),
384 (flags & VRING_DESC_F_WRITE) ?
385 DMA_FROM_DEVICE : DMA_TO_DEVICE);
386 }
387}
388
Tiwei Bie138fd252018-11-21 18:03:19 +0800389static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
390 unsigned int total_sg,
391 gfp_t gfp)
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100392{
393 struct vring_desc *desc;
Rusty Russellb25bd252014-09-11 10:17:38 +0930394 unsigned int i;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100395
Will Deaconb92b1b82012-10-19 14:03:33 +0100396 /*
397 * We require lowmem mappings for the descriptors because
398 * otherwise virt_to_phys will give us bogus addresses in the
399 * virtqueue.
400 */
Michal Hocko82107532015-12-01 15:32:49 +0100401 gfp &= ~__GFP_HIGHMEM;
Will Deaconb92b1b82012-10-19 14:03:33 +0100402
Kees Cook6da2ec52018-06-12 13:55:00 -0700403 desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100404 if (!desc)
Rusty Russellb25bd252014-09-11 10:17:38 +0930405 return NULL;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100406
Rusty Russellb25bd252014-09-11 10:17:38 +0930407 for (i = 0; i < total_sg; i++)
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300408 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
Rusty Russellb25bd252014-09-11 10:17:38 +0930409 return desc;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100410}
411
Tiwei Bie138fd252018-11-21 18:03:19 +0800412static inline int virtqueue_add_split(struct virtqueue *_vq,
413 struct scatterlist *sgs[],
414 unsigned int total_sg,
415 unsigned int out_sgs,
416 unsigned int in_sgs,
417 void *data,
418 void *ctx,
419 gfp_t gfp)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000420{
421 struct vring_virtqueue *vq = to_vvq(_vq);
Rusty Russell13816c72013-03-20 15:37:09 +1030422 struct scatterlist *sg;
Rusty Russellb25bd252014-09-11 10:17:38 +0930423 struct vring_desc *desc;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800424 unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
Michael S. Tsirkin1fe9b6f2010-07-26 16:55:30 +0930425 int head;
Rusty Russellb25bd252014-09-11 10:17:38 +0930426 bool indirect;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000427
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100428 START_USE(vq);
429
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000430 BUG_ON(data == NULL);
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200431 BUG_ON(ctx && vq->indirect);
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100432
Rusty Russell70670442014-03-13 11:23:40 +1030433 if (unlikely(vq->broken)) {
434 END_USE(vq);
435 return -EIO;
436 }
437
Tiwei Bie4d6a1052018-11-21 18:03:22 +0800438 LAST_ADD_TIME_UPDATE(vq);
Rusty Russelle93300b2012-01-12 15:44:43 +1030439
Rusty Russell13816c72013-03-20 15:37:09 +1030440 BUG_ON(total_sg == 0);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000441
Rusty Russellb25bd252014-09-11 10:17:38 +0930442 head = vq->free_head;
443
Tiwei Bie2f18c2d2018-11-21 18:03:23 +0800444 if (virtqueue_use_indirect(_vq, total_sg))
Tiwei Bie138fd252018-11-21 18:03:19 +0800445 desc = alloc_indirect_split(_vq, total_sg, gfp);
Richard W.M. Jones44ed8082017-08-10 17:56:51 +0100446 else {
Rusty Russellb25bd252014-09-11 10:17:38 +0930447 desc = NULL;
Tiwei Biee593bf92018-11-21 18:03:21 +0800448 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
Richard W.M. Jones44ed8082017-08-10 17:56:51 +0100449 }
Rusty Russellb25bd252014-09-11 10:17:38 +0930450
451 if (desc) {
452 /* Use a single buffer which doesn't continue */
Andy Lutomirski780bc792016-02-02 21:46:36 -0800453 indirect = true;
Rusty Russellb25bd252014-09-11 10:17:38 +0930454 /* Set up rest to use this indirect table. */
455 i = 0;
456 descs_used = 1;
Rusty Russellb25bd252014-09-11 10:17:38 +0930457 } else {
Andy Lutomirski780bc792016-02-02 21:46:36 -0800458 indirect = false;
Tiwei Biee593bf92018-11-21 18:03:21 +0800459 desc = vq->split.vring.desc;
Rusty Russellb25bd252014-09-11 10:17:38 +0930460 i = head;
461 descs_used = total_sg;
Rusty Russellb25bd252014-09-11 10:17:38 +0930462 }
463
464 if (vq->vq.num_free < descs_used) {
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000465 pr_debug("Can't add buf len %i - avail = %i\n",
Rusty Russellb25bd252014-09-11 10:17:38 +0930466 descs_used, vq->vq.num_free);
Rusty Russell44653ea2008-07-25 12:06:04 -0500467 /* FIXME: for historical reasons, we force a notify here if
468 * there are outgoing parts to the buffer. Presumably the
469 * host should service the ring ASAP. */
Rusty Russell13816c72013-03-20 15:37:09 +1030470 if (out_sgs)
Rusty Russell44653ea2008-07-25 12:06:04 -0500471 vq->notify(&vq->vq);
Wei Yongjun58625ed2016-08-02 14:16:31 +0000472 if (indirect)
473 kfree(desc);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000474 END_USE(vq);
475 return -ENOSPC;
476 }
477
Rusty Russell13816c72013-03-20 15:37:09 +1030478 for (n = 0; n < out_sgs; n++) {
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930479 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
Andy Lutomirski780bc792016-02-02 21:46:36 -0800480 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
481 if (vring_mapping_error(vq, addr))
482 goto unmap_release;
483
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300484 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800485 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300486 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
Rusty Russell13816c72013-03-20 15:37:09 +1030487 prev = i;
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300488 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
Rusty Russell13816c72013-03-20 15:37:09 +1030489 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000490 }
Rusty Russell13816c72013-03-20 15:37:09 +1030491 for (; n < (out_sgs + in_sgs); n++) {
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930492 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
Andy Lutomirski780bc792016-02-02 21:46:36 -0800493 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
494 if (vring_mapping_error(vq, addr))
495 goto unmap_release;
496
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300497 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800498 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300499 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
Rusty Russell13816c72013-03-20 15:37:09 +1030500 prev = i;
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300501 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
Rusty Russell13816c72013-03-20 15:37:09 +1030502 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000503 }
504 /* Last one doesn't continue. */
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300505 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000506
Andy Lutomirski780bc792016-02-02 21:46:36 -0800507 if (indirect) {
508 /* Now that the indirect table is filled in, map it. */
509 dma_addr_t addr = vring_map_single(
510 vq, desc, total_sg * sizeof(struct vring_desc),
511 DMA_TO_DEVICE);
512 if (vring_mapping_error(vq, addr))
513 goto unmap_release;
514
Tiwei Biee593bf92018-11-21 18:03:21 +0800515 vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev,
516 VRING_DESC_F_INDIRECT);
517 vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev,
518 addr);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800519
Tiwei Biee593bf92018-11-21 18:03:21 +0800520 vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev,
521 total_sg * sizeof(struct vring_desc));
Andy Lutomirski780bc792016-02-02 21:46:36 -0800522 }
523
524 /* We're using some buffers from the free list. */
525 vq->vq.num_free -= descs_used;
526
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000527 /* Update free pointer */
Rusty Russellb25bd252014-09-11 10:17:38 +0930528 if (indirect)
Tiwei Biee593bf92018-11-21 18:03:21 +0800529 vq->free_head = virtio16_to_cpu(_vq->vdev,
530 vq->split.vring.desc[head].next);
Rusty Russellb25bd252014-09-11 10:17:38 +0930531 else
532 vq->free_head = i;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000533
Andy Lutomirski780bc792016-02-02 21:46:36 -0800534 /* Store token and indirect buffer state. */
Tiwei Biecbeedb72018-11-21 18:03:24 +0800535 vq->split.desc_state[head].data = data;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800536 if (indirect)
Tiwei Biecbeedb72018-11-21 18:03:24 +0800537 vq->split.desc_state[head].indir_desc = desc;
Jason Wang87646a32017-07-19 16:54:45 +0800538 else
Tiwei Biecbeedb72018-11-21 18:03:24 +0800539 vq->split.desc_state[head].indir_desc = ctx;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000540
541 /* Put entry in available array (but don't update avail->idx until they
Rusty Russell3b720b82012-01-12 15:44:43 +1030542 * do sync). */
Tiwei Biee593bf92018-11-21 18:03:21 +0800543 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
544 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000545
Rusty Russellee7cd892012-01-12 15:44:43 +1030546 /* Descriptors and available array need to be set before we expose the
547 * new available array entries. */
Rusty Russella9a0fef2013-03-18 13:22:19 +1030548 virtio_wmb(vq->weak_barriers);
Tiwei Biee593bf92018-11-21 18:03:21 +0800549 vq->split.avail_idx_shadow++;
550 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
551 vq->split.avail_idx_shadow);
Rusty Russellee7cd892012-01-12 15:44:43 +1030552 vq->num_added++;
553
Tetsuo Handa5e05bf52015-02-11 15:01:13 +1030554 pr_debug("Added buffer head %i to %p\n", head, vq);
555 END_USE(vq);
556
Rusty Russellee7cd892012-01-12 15:44:43 +1030557 /* This is very unlikely, but theoretically possible. Kick
558 * just in case. */
559 if (unlikely(vq->num_added == (1 << 16) - 1))
560 virtqueue_kick(_vq);
561
Rusty Russell98e8c6b2012-10-16 23:56:15 +1030562 return 0;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800563
564unmap_release:
565 err_idx = i;
566 i = head;
567
568 for (n = 0; n < total_sg; n++) {
569 if (i == err_idx)
570 break;
Tiwei Bie138fd252018-11-21 18:03:19 +0800571 vring_unmap_one_split(vq, &desc[i]);
Tiwei Biee593bf92018-11-21 18:03:21 +0800572 i = virtio16_to_cpu(_vq->vdev, vq->split.vring.desc[i].next);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800573 }
574
Andy Lutomirski780bc792016-02-02 21:46:36 -0800575 if (indirect)
576 kfree(desc);
577
Michael S. Tsirkin3cc36f62016-08-03 07:18:51 +0300578 END_USE(vq);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800579 return -EIO;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000580}
Rusty Russell13816c72013-03-20 15:37:09 +1030581
Tiwei Bie138fd252018-11-21 18:03:19 +0800582static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000583{
584 struct vring_virtqueue *vq = to_vvq(_vq);
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300585 u16 new, old;
Rusty Russell41f03772012-01-12 15:44:43 +1030586 bool needs_kick;
587
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000588 START_USE(vq);
Jason Wanga72caae2012-01-20 16:17:08 +0800589 /* We need to expose available array entries before checking avail
590 * event. */
Rusty Russella9a0fef2013-03-18 13:22:19 +1030591 virtio_mb(vq->weak_barriers);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000592
Tiwei Biee593bf92018-11-21 18:03:21 +0800593 old = vq->split.avail_idx_shadow - vq->num_added;
594 new = vq->split.avail_idx_shadow;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000595 vq->num_added = 0;
596
Tiwei Bie4d6a1052018-11-21 18:03:22 +0800597 LAST_ADD_TIME_CHECK(vq);
598 LAST_ADD_TIME_INVALID(vq);
Rusty Russelle93300b2012-01-12 15:44:43 +1030599
Rusty Russell41f03772012-01-12 15:44:43 +1030600 if (vq->event) {
Tiwei Biee593bf92018-11-21 18:03:21 +0800601 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
602 vring_avail_event(&vq->split.vring)),
Rusty Russell41f03772012-01-12 15:44:43 +1030603 new, old);
604 } else {
Tiwei Biee593bf92018-11-21 18:03:21 +0800605 needs_kick = !(vq->split.vring.used->flags &
606 cpu_to_virtio16(_vq->vdev,
607 VRING_USED_F_NO_NOTIFY));
Rusty Russell41f03772012-01-12 15:44:43 +1030608 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000609 END_USE(vq);
Rusty Russell41f03772012-01-12 15:44:43 +1030610 return needs_kick;
611}
Tiwei Bie138fd252018-11-21 18:03:19 +0800612
Tiwei Bie138fd252018-11-21 18:03:19 +0800613static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
614 void **ctx)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000615{
Andy Lutomirski780bc792016-02-02 21:46:36 -0800616 unsigned int i, j;
Gongleic60923c2016-11-22 13:51:50 +0800617 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000618
619 /* Clear data ptr. */
Tiwei Biecbeedb72018-11-21 18:03:24 +0800620 vq->split.desc_state[head].data = NULL;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000621
Andy Lutomirski780bc792016-02-02 21:46:36 -0800622 /* Put back on free list: unmap first-level descriptors and find end */
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000623 i = head;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100624
Tiwei Biee593bf92018-11-21 18:03:21 +0800625 while (vq->split.vring.desc[i].flags & nextflag) {
626 vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
627 i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next);
Rusty Russell06ca2872012-10-16 23:56:14 +1030628 vq->vq.num_free++;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000629 }
630
Tiwei Biee593bf92018-11-21 18:03:21 +0800631 vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
632 vq->split.vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev,
633 vq->free_head);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000634 vq->free_head = head;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800635
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000636 /* Plus final descriptor */
Rusty Russell06ca2872012-10-16 23:56:14 +1030637 vq->vq.num_free++;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800638
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200639 if (vq->indirect) {
Tiwei Biecbeedb72018-11-21 18:03:24 +0800640 struct vring_desc *indir_desc =
641 vq->split.desc_state[head].indir_desc;
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200642 u32 len;
643
644 /* Free the indirect table, if any, now that it's unmapped. */
645 if (!indir_desc)
646 return;
647
Tiwei Biee593bf92018-11-21 18:03:21 +0800648 len = virtio32_to_cpu(vq->vq.vdev,
649 vq->split.vring.desc[head].len);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800650
Tiwei Biee593bf92018-11-21 18:03:21 +0800651 BUG_ON(!(vq->split.vring.desc[head].flags &
Andy Lutomirski780bc792016-02-02 21:46:36 -0800652 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
653 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
654
655 for (j = 0; j < len / sizeof(struct vring_desc); j++)
Tiwei Bie138fd252018-11-21 18:03:19 +0800656 vring_unmap_one_split(vq, &indir_desc[j]);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800657
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200658 kfree(indir_desc);
Tiwei Biecbeedb72018-11-21 18:03:24 +0800659 vq->split.desc_state[head].indir_desc = NULL;
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200660 } else if (ctx) {
Tiwei Biecbeedb72018-11-21 18:03:24 +0800661 *ctx = vq->split.desc_state[head].indir_desc;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800662 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000663}
664
Tiwei Bie138fd252018-11-21 18:03:19 +0800665static inline bool more_used_split(const struct vring_virtqueue *vq)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000666{
Tiwei Biee593bf92018-11-21 18:03:21 +0800667 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
668 vq->split.vring.used->idx);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000669}
670
Tiwei Bie138fd252018-11-21 18:03:19 +0800671static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
672 unsigned int *len,
673 void **ctx)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000674{
675 struct vring_virtqueue *vq = to_vvq(_vq);
676 void *ret;
677 unsigned int i;
Rusty Russell3b720b82012-01-12 15:44:43 +1030678 u16 last_used;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000679
680 START_USE(vq);
681
Rusty Russell5ef82752008-05-02 21:50:43 -0500682 if (unlikely(vq->broken)) {
683 END_USE(vq);
684 return NULL;
685 }
686
Tiwei Bie138fd252018-11-21 18:03:19 +0800687 if (!more_used_split(vq)) {
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000688 pr_debug("No more buffers in queue\n");
689 END_USE(vq);
690 return NULL;
691 }
692
Michael S. Tsirkin2d61ba92009-10-25 15:28:53 +0200693 /* Only get used array entries after they have been exposed by host. */
Rusty Russella9a0fef2013-03-18 13:22:19 +1030694 virtio_rmb(vq->weak_barriers);
Michael S. Tsirkin2d61ba92009-10-25 15:28:53 +0200695
Tiwei Biee593bf92018-11-21 18:03:21 +0800696 last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
697 i = virtio32_to_cpu(_vq->vdev,
698 vq->split.vring.used->ring[last_used].id);
699 *len = virtio32_to_cpu(_vq->vdev,
700 vq->split.vring.used->ring[last_used].len);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000701
Tiwei Biee593bf92018-11-21 18:03:21 +0800702 if (unlikely(i >= vq->split.vring.num)) {
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000703 BAD_RING(vq, "id %u out of range\n", i);
704 return NULL;
705 }
Tiwei Biecbeedb72018-11-21 18:03:24 +0800706 if (unlikely(!vq->split.desc_state[i].data)) {
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000707 BAD_RING(vq, "id %u is not a head!\n", i);
708 return NULL;
709 }
710
Tiwei Bie138fd252018-11-21 18:03:19 +0800711 /* detach_buf_split clears data, so grab it now. */
Tiwei Biecbeedb72018-11-21 18:03:24 +0800712 ret = vq->split.desc_state[i].data;
Tiwei Bie138fd252018-11-21 18:03:19 +0800713 detach_buf_split(vq, i, ctx);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000714 vq->last_used_idx++;
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300715 /* If we expect an interrupt for the next entry, tell host
716 * by writing event index and flush out the write before
717 * the read in the next get_buf call. */
Tiwei Biee593bf92018-11-21 18:03:21 +0800718 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
Michael S. Tsirkin788e5b32015-12-17 12:20:39 +0200719 virtio_store_mb(vq->weak_barriers,
Tiwei Biee593bf92018-11-21 18:03:21 +0800720 &vring_used_event(&vq->split.vring),
Michael S. Tsirkin788e5b32015-12-17 12:20:39 +0200721 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300722
Tiwei Bie4d6a1052018-11-21 18:03:22 +0800723 LAST_ADD_TIME_INVALID(vq);
Rusty Russelle93300b2012-01-12 15:44:43 +1030724
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000725 END_USE(vq);
726 return ret;
727}
Tiwei Bie138fd252018-11-21 18:03:19 +0800728
Tiwei Bie138fd252018-11-21 18:03:19 +0800729static void virtqueue_disable_cb_split(struct virtqueue *_vq)
730{
731 struct vring_virtqueue *vq = to_vvq(_vq);
732
Tiwei Biee593bf92018-11-21 18:03:21 +0800733 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
734 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
Tiwei Bie138fd252018-11-21 18:03:19 +0800735 if (!vq->event)
Tiwei Biee593bf92018-11-21 18:03:21 +0800736 vq->split.vring.avail->flags =
737 cpu_to_virtio16(_vq->vdev,
738 vq->split.avail_flags_shadow);
Tiwei Bie138fd252018-11-21 18:03:19 +0800739 }
740}
741
Tiwei Bie138fd252018-11-21 18:03:19 +0800742static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
Michael S. Tsirkincc229882013-07-09 13:19:18 +0300743{
744 struct vring_virtqueue *vq = to_vvq(_vq);
745 u16 last_used_idx;
746
747 START_USE(vq);
748
749 /* We optimistically turn back on interrupts, then check if there was
750 * more to do. */
751 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
752 * either clear the flags bit or point the event index at the next
753 * entry. Always do both to keep code simple. */
Tiwei Biee593bf92018-11-21 18:03:21 +0800754 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
755 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
Ladi Prosek0ea1e4a2016-08-31 14:00:04 +0200756 if (!vq->event)
Tiwei Biee593bf92018-11-21 18:03:21 +0800757 vq->split.vring.avail->flags =
758 cpu_to_virtio16(_vq->vdev,
759 vq->split.avail_flags_shadow);
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800760 }
Tiwei Biee593bf92018-11-21 18:03:21 +0800761 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
762 last_used_idx = vq->last_used_idx);
Michael S. Tsirkincc229882013-07-09 13:19:18 +0300763 END_USE(vq);
764 return last_used_idx;
765}
Tiwei Bie138fd252018-11-21 18:03:19 +0800766
Tiwei Bie138fd252018-11-21 18:03:19 +0800767static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
768{
769 struct vring_virtqueue *vq = to_vvq(_vq);
770
771 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
Tiwei Biee593bf92018-11-21 18:03:21 +0800772 vq->split.vring.used->idx);
Tiwei Bie138fd252018-11-21 18:03:19 +0800773}
774
Tiwei Bie138fd252018-11-21 18:03:19 +0800775static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
Michael S. Tsirkin7ab358c2011-05-20 02:11:14 +0300776{
777 struct vring_virtqueue *vq = to_vvq(_vq);
778 u16 bufs;
779
780 START_USE(vq);
781
782 /* We optimistically turn back on interrupts, then check if there was
783 * more to do. */
784 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
785 * either clear the flags bit or point the event index at the next
Ladi Prosek0ea1e4a2016-08-31 14:00:04 +0200786 * entry. Always update the event index to keep code simple. */
Tiwei Biee593bf92018-11-21 18:03:21 +0800787 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
788 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
Ladi Prosek0ea1e4a2016-08-31 14:00:04 +0200789 if (!vq->event)
Tiwei Biee593bf92018-11-21 18:03:21 +0800790 vq->split.vring.avail->flags =
791 cpu_to_virtio16(_vq->vdev,
792 vq->split.avail_flags_shadow);
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800793 }
Michael S. Tsirkin7ab358c2011-05-20 02:11:14 +0300794 /* TODO: tune this threshold */
Tiwei Biee593bf92018-11-21 18:03:21 +0800795 bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
Michael S. Tsirkin788e5b32015-12-17 12:20:39 +0200796
797 virtio_store_mb(vq->weak_barriers,
Tiwei Biee593bf92018-11-21 18:03:21 +0800798 &vring_used_event(&vq->split.vring),
Michael S. Tsirkin788e5b32015-12-17 12:20:39 +0200799 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
800
Tiwei Biee593bf92018-11-21 18:03:21 +0800801 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
802 - vq->last_used_idx) > bufs)) {
Michael S. Tsirkin7ab358c2011-05-20 02:11:14 +0300803 END_USE(vq);
804 return false;
805 }
806
807 END_USE(vq);
808 return true;
809}
Michael S. Tsirkin7ab358c2011-05-20 02:11:14 +0300810
Tiwei Bie138fd252018-11-21 18:03:19 +0800811static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
Shirley Mac021eac2010-01-18 19:15:23 +0530812{
813 struct vring_virtqueue *vq = to_vvq(_vq);
814 unsigned int i;
815 void *buf;
816
817 START_USE(vq);
818
Tiwei Biee593bf92018-11-21 18:03:21 +0800819 for (i = 0; i < vq->split.vring.num; i++) {
Tiwei Biecbeedb72018-11-21 18:03:24 +0800820 if (!vq->split.desc_state[i].data)
Shirley Mac021eac2010-01-18 19:15:23 +0530821 continue;
Tiwei Bie138fd252018-11-21 18:03:19 +0800822 /* detach_buf_split clears data, so grab it now. */
Tiwei Biecbeedb72018-11-21 18:03:24 +0800823 buf = vq->split.desc_state[i].data;
Tiwei Bie138fd252018-11-21 18:03:19 +0800824 detach_buf_split(vq, i, NULL);
Tiwei Biee593bf92018-11-21 18:03:21 +0800825 vq->split.avail_idx_shadow--;
826 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
827 vq->split.avail_idx_shadow);
Shirley Mac021eac2010-01-18 19:15:23 +0530828 END_USE(vq);
829 return buf;
830 }
831 /* That should have freed everything. */
Tiwei Biee593bf92018-11-21 18:03:21 +0800832 BUG_ON(vq->vq.num_free != vq->split.vring.num);
Shirley Mac021eac2010-01-18 19:15:23 +0530833
834 END_USE(vq);
835 return NULL;
836}
Tiwei Bie138fd252018-11-21 18:03:19 +0800837
Tiwei Bied79dca72018-11-21 18:03:25 +0800838static struct virtqueue *vring_create_virtqueue_split(
839 unsigned int index,
840 unsigned int num,
841 unsigned int vring_align,
842 struct virtio_device *vdev,
843 bool weak_barriers,
844 bool may_reduce_num,
845 bool context,
846 bool (*notify)(struct virtqueue *),
847 void (*callback)(struct virtqueue *),
848 const char *name)
849{
850 struct virtqueue *vq;
851 void *queue = NULL;
852 dma_addr_t dma_addr;
853 size_t queue_size_in_bytes;
854 struct vring vring;
855
856 /* We assume num is a power of 2. */
857 if (num & (num - 1)) {
858 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
859 return NULL;
860 }
861
862 /* TODO: allocate each queue chunk individually */
863 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
864 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
865 &dma_addr,
866 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
867 if (queue)
868 break;
869 }
870
871 if (!num)
872 return NULL;
873
874 if (!queue) {
875 /* Try to get a single page. You are my only hope! */
876 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
877 &dma_addr, GFP_KERNEL|__GFP_ZERO);
878 }
879 if (!queue)
880 return NULL;
881
882 queue_size_in_bytes = vring_size(num, vring_align);
883 vring_init(&vring, num, queue, vring_align);
884
885 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
886 notify, callback, name);
887 if (!vq) {
888 vring_free_queue(vdev, queue_size_in_bytes, queue,
889 dma_addr);
890 return NULL;
891 }
892
893 to_vvq(vq)->split.queue_dma_addr = dma_addr;
894 to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
895 to_vvq(vq)->we_own_ring = true;
896
897 return vq;
898}
899
Tiwei Biee6f633e2018-11-21 18:03:20 +0800900
901/*
Tiwei Bie1ce9e602018-11-21 18:03:27 +0800902 * Packed ring specific functions - *_packed().
903 */
904
905static void vring_unmap_state_packed(const struct vring_virtqueue *vq,
906 struct vring_desc_extra_packed *state)
907{
908 u16 flags;
909
910 if (!vq->use_dma_api)
911 return;
912
913 flags = state->flags;
914
915 if (flags & VRING_DESC_F_INDIRECT) {
916 dma_unmap_single(vring_dma_dev(vq),
917 state->addr, state->len,
918 (flags & VRING_DESC_F_WRITE) ?
919 DMA_FROM_DEVICE : DMA_TO_DEVICE);
920 } else {
921 dma_unmap_page(vring_dma_dev(vq),
922 state->addr, state->len,
923 (flags & VRING_DESC_F_WRITE) ?
924 DMA_FROM_DEVICE : DMA_TO_DEVICE);
925 }
926}
927
928static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
929 struct vring_packed_desc *desc)
930{
931 u16 flags;
932
933 if (!vq->use_dma_api)
934 return;
935
936 flags = le16_to_cpu(desc->flags);
937
938 if (flags & VRING_DESC_F_INDIRECT) {
939 dma_unmap_single(vring_dma_dev(vq),
940 le64_to_cpu(desc->addr),
941 le32_to_cpu(desc->len),
942 (flags & VRING_DESC_F_WRITE) ?
943 DMA_FROM_DEVICE : DMA_TO_DEVICE);
944 } else {
945 dma_unmap_page(vring_dma_dev(vq),
946 le64_to_cpu(desc->addr),
947 le32_to_cpu(desc->len),
948 (flags & VRING_DESC_F_WRITE) ?
949 DMA_FROM_DEVICE : DMA_TO_DEVICE);
950 }
951}
952
953static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
954 gfp_t gfp)
955{
956 struct vring_packed_desc *desc;
957
958 /*
959 * We require lowmem mappings for the descriptors because
960 * otherwise virt_to_phys will give us bogus addresses in the
961 * virtqueue.
962 */
963 gfp &= ~__GFP_HIGHMEM;
964
965 desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
966
967 return desc;
968}
969
970static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
971 struct scatterlist *sgs[],
972 unsigned int total_sg,
973 unsigned int out_sgs,
974 unsigned int in_sgs,
975 void *data,
976 gfp_t gfp)
977{
978 struct vring_packed_desc *desc;
979 struct scatterlist *sg;
980 unsigned int i, n, err_idx;
981 u16 head, id;
982 dma_addr_t addr;
983
984 head = vq->packed.next_avail_idx;
985 desc = alloc_indirect_packed(total_sg, gfp);
986
987 if (unlikely(vq->vq.num_free < 1)) {
988 pr_debug("Can't add buf len 1 - avail = 0\n");
989 END_USE(vq);
990 return -ENOSPC;
991 }
992
993 i = 0;
994 id = vq->free_head;
995 BUG_ON(id == vq->packed.vring.num);
996
997 for (n = 0; n < out_sgs + in_sgs; n++) {
998 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
999 addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1000 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1001 if (vring_mapping_error(vq, addr))
1002 goto unmap_release;
1003
1004 desc[i].flags = cpu_to_le16(n < out_sgs ?
1005 0 : VRING_DESC_F_WRITE);
1006 desc[i].addr = cpu_to_le64(addr);
1007 desc[i].len = cpu_to_le32(sg->length);
1008 i++;
1009 }
1010 }
1011
1012 /* Now that the indirect table is filled in, map it. */
1013 addr = vring_map_single(vq, desc,
1014 total_sg * sizeof(struct vring_packed_desc),
1015 DMA_TO_DEVICE);
1016 if (vring_mapping_error(vq, addr))
1017 goto unmap_release;
1018
1019 vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
1020 vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
1021 sizeof(struct vring_packed_desc));
1022 vq->packed.vring.desc[head].id = cpu_to_le16(id);
1023
1024 if (vq->use_dma_api) {
1025 vq->packed.desc_extra[id].addr = addr;
1026 vq->packed.desc_extra[id].len = total_sg *
1027 sizeof(struct vring_packed_desc);
1028 vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
1029 vq->packed.avail_used_flags;
1030 }
1031
1032 /*
1033 * A driver MUST NOT make the first descriptor in the list
1034 * available before all subsequent descriptors comprising
1035 * the list are made available.
1036 */
1037 virtio_wmb(vq->weak_barriers);
1038 vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
1039 vq->packed.avail_used_flags);
1040
1041 /* We're using some buffers from the free list. */
1042 vq->vq.num_free -= 1;
1043
1044 /* Update free pointer */
1045 n = head + 1;
1046 if (n >= vq->packed.vring.num) {
1047 n = 0;
1048 vq->packed.avail_wrap_counter ^= 1;
1049 vq->packed.avail_used_flags ^=
1050 1 << VRING_PACKED_DESC_F_AVAIL |
1051 1 << VRING_PACKED_DESC_F_USED;
1052 }
1053 vq->packed.next_avail_idx = n;
1054 vq->free_head = vq->packed.desc_state[id].next;
1055
1056 /* Store token and indirect buffer state. */
1057 vq->packed.desc_state[id].num = 1;
1058 vq->packed.desc_state[id].data = data;
1059 vq->packed.desc_state[id].indir_desc = desc;
1060 vq->packed.desc_state[id].last = id;
1061
1062 vq->num_added += 1;
1063
1064 pr_debug("Added buffer head %i to %p\n", head, vq);
1065 END_USE(vq);
1066
1067 return 0;
1068
1069unmap_release:
1070 err_idx = i;
1071
1072 for (i = 0; i < err_idx; i++)
1073 vring_unmap_desc_packed(vq, &desc[i]);
1074
1075 kfree(desc);
1076
1077 END_USE(vq);
1078 return -EIO;
1079}
1080
1081static inline int virtqueue_add_packed(struct virtqueue *_vq,
1082 struct scatterlist *sgs[],
1083 unsigned int total_sg,
1084 unsigned int out_sgs,
1085 unsigned int in_sgs,
1086 void *data,
1087 void *ctx,
1088 gfp_t gfp)
1089{
1090 struct vring_virtqueue *vq = to_vvq(_vq);
1091 struct vring_packed_desc *desc;
1092 struct scatterlist *sg;
1093 unsigned int i, n, c, descs_used, err_idx;
1094 __le16 uninitialized_var(head_flags), flags;
1095 u16 head, id, uninitialized_var(prev), curr, avail_used_flags;
1096
1097 START_USE(vq);
1098
1099 BUG_ON(data == NULL);
1100 BUG_ON(ctx && vq->indirect);
1101
1102 if (unlikely(vq->broken)) {
1103 END_USE(vq);
1104 return -EIO;
1105 }
1106
1107 LAST_ADD_TIME_UPDATE(vq);
1108
1109 BUG_ON(total_sg == 0);
1110
1111 if (virtqueue_use_indirect(_vq, total_sg))
1112 return virtqueue_add_indirect_packed(vq, sgs, total_sg,
1113 out_sgs, in_sgs, data, gfp);
1114
1115 head = vq->packed.next_avail_idx;
1116 avail_used_flags = vq->packed.avail_used_flags;
1117
1118 WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
1119
1120 desc = vq->packed.vring.desc;
1121 i = head;
1122 descs_used = total_sg;
1123
1124 if (unlikely(vq->vq.num_free < descs_used)) {
1125 pr_debug("Can't add buf len %i - avail = %i\n",
1126 descs_used, vq->vq.num_free);
1127 END_USE(vq);
1128 return -ENOSPC;
1129 }
1130
1131 id = vq->free_head;
1132 BUG_ON(id == vq->packed.vring.num);
1133
1134 curr = id;
1135 c = 0;
1136 for (n = 0; n < out_sgs + in_sgs; n++) {
1137 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1138 dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1139 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1140 if (vring_mapping_error(vq, addr))
1141 goto unmap_release;
1142
1143 flags = cpu_to_le16(vq->packed.avail_used_flags |
1144 (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
1145 (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
1146 if (i == head)
1147 head_flags = flags;
1148 else
1149 desc[i].flags = flags;
1150
1151 desc[i].addr = cpu_to_le64(addr);
1152 desc[i].len = cpu_to_le32(sg->length);
1153 desc[i].id = cpu_to_le16(id);
1154
1155 if (unlikely(vq->use_dma_api)) {
1156 vq->packed.desc_extra[curr].addr = addr;
1157 vq->packed.desc_extra[curr].len = sg->length;
1158 vq->packed.desc_extra[curr].flags =
1159 le16_to_cpu(flags);
1160 }
1161 prev = curr;
1162 curr = vq->packed.desc_state[curr].next;
1163
1164 if ((unlikely(++i >= vq->packed.vring.num))) {
1165 i = 0;
1166 vq->packed.avail_used_flags ^=
1167 1 << VRING_PACKED_DESC_F_AVAIL |
1168 1 << VRING_PACKED_DESC_F_USED;
1169 }
1170 }
1171 }
1172
1173 if (i < head)
1174 vq->packed.avail_wrap_counter ^= 1;
1175
1176 /* We're using some buffers from the free list. */
1177 vq->vq.num_free -= descs_used;
1178
1179 /* Update free pointer */
1180 vq->packed.next_avail_idx = i;
1181 vq->free_head = curr;
1182
1183 /* Store token. */
1184 vq->packed.desc_state[id].num = descs_used;
1185 vq->packed.desc_state[id].data = data;
1186 vq->packed.desc_state[id].indir_desc = ctx;
1187 vq->packed.desc_state[id].last = prev;
1188
1189 /*
1190 * A driver MUST NOT make the first descriptor in the list
1191 * available before all subsequent descriptors comprising
1192 * the list are made available.
1193 */
1194 virtio_wmb(vq->weak_barriers);
1195 vq->packed.vring.desc[head].flags = head_flags;
1196 vq->num_added += descs_used;
1197
1198 pr_debug("Added buffer head %i to %p\n", head, vq);
1199 END_USE(vq);
1200
1201 return 0;
1202
1203unmap_release:
1204 err_idx = i;
1205 i = head;
1206
1207 vq->packed.avail_used_flags = avail_used_flags;
1208
1209 for (n = 0; n < total_sg; n++) {
1210 if (i == err_idx)
1211 break;
1212 vring_unmap_desc_packed(vq, &desc[i]);
1213 i++;
1214 if (i >= vq->packed.vring.num)
1215 i = 0;
1216 }
1217
1218 END_USE(vq);
1219 return -EIO;
1220}
1221
1222static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
1223{
1224 struct vring_virtqueue *vq = to_vvq(_vq);
Tiwei Bief51f9822018-11-21 18:03:28 +08001225 u16 new, old, off_wrap, flags, wrap_counter, event_idx;
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001226 bool needs_kick;
1227 union {
1228 struct {
1229 __le16 off_wrap;
1230 __le16 flags;
1231 };
1232 u32 u32;
1233 } snapshot;
1234
1235 START_USE(vq);
1236
1237 /*
1238 * We need to expose the new flags value before checking notification
1239 * suppressions.
1240 */
1241 virtio_mb(vq->weak_barriers);
1242
Tiwei Bief51f9822018-11-21 18:03:28 +08001243 old = vq->packed.next_avail_idx - vq->num_added;
1244 new = vq->packed.next_avail_idx;
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001245 vq->num_added = 0;
1246
1247 snapshot.u32 = *(u32 *)vq->packed.vring.device;
1248 flags = le16_to_cpu(snapshot.flags);
1249
1250 LAST_ADD_TIME_CHECK(vq);
1251 LAST_ADD_TIME_INVALID(vq);
1252
Tiwei Bief51f9822018-11-21 18:03:28 +08001253 if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
1254 needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
1255 goto out;
1256 }
1257
1258 off_wrap = le16_to_cpu(snapshot.off_wrap);
1259
1260 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1261 event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1262 if (wrap_counter != vq->packed.avail_wrap_counter)
1263 event_idx -= vq->packed.vring.num;
1264
1265 needs_kick = vring_need_event(event_idx, new, old);
1266out:
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001267 END_USE(vq);
1268 return needs_kick;
1269}
1270
1271static void detach_buf_packed(struct vring_virtqueue *vq,
1272 unsigned int id, void **ctx)
1273{
1274 struct vring_desc_state_packed *state = NULL;
1275 struct vring_packed_desc *desc;
1276 unsigned int i, curr;
1277
1278 state = &vq->packed.desc_state[id];
1279
1280 /* Clear data ptr. */
1281 state->data = NULL;
1282
1283 vq->packed.desc_state[state->last].next = vq->free_head;
1284 vq->free_head = id;
1285 vq->vq.num_free += state->num;
1286
1287 if (unlikely(vq->use_dma_api)) {
1288 curr = id;
1289 for (i = 0; i < state->num; i++) {
1290 vring_unmap_state_packed(vq,
1291 &vq->packed.desc_extra[curr]);
1292 curr = vq->packed.desc_state[curr].next;
1293 }
1294 }
1295
1296 if (vq->indirect) {
1297 u32 len;
1298
1299 /* Free the indirect table, if any, now that it's unmapped. */
1300 desc = state->indir_desc;
1301 if (!desc)
1302 return;
1303
1304 if (vq->use_dma_api) {
1305 len = vq->packed.desc_extra[id].len;
1306 for (i = 0; i < len / sizeof(struct vring_packed_desc);
1307 i++)
1308 vring_unmap_desc_packed(vq, &desc[i]);
1309 }
1310 kfree(desc);
1311 state->indir_desc = NULL;
1312 } else if (ctx) {
1313 *ctx = state->indir_desc;
1314 }
1315}
1316
1317static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
1318 u16 idx, bool used_wrap_counter)
1319{
1320 bool avail, used;
1321 u16 flags;
1322
1323 flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
1324 avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
1325 used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
1326
1327 return avail == used && used == used_wrap_counter;
1328}
1329
1330static inline bool more_used_packed(const struct vring_virtqueue *vq)
1331{
1332 return is_used_desc_packed(vq, vq->last_used_idx,
1333 vq->packed.used_wrap_counter);
1334}
1335
1336static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
1337 unsigned int *len,
1338 void **ctx)
1339{
1340 struct vring_virtqueue *vq = to_vvq(_vq);
1341 u16 last_used, id;
1342 void *ret;
1343
1344 START_USE(vq);
1345
1346 if (unlikely(vq->broken)) {
1347 END_USE(vq);
1348 return NULL;
1349 }
1350
1351 if (!more_used_packed(vq)) {
1352 pr_debug("No more buffers in queue\n");
1353 END_USE(vq);
1354 return NULL;
1355 }
1356
1357 /* Only get used elements after they have been exposed by host. */
1358 virtio_rmb(vq->weak_barriers);
1359
1360 last_used = vq->last_used_idx;
1361 id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
1362 *len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
1363
1364 if (unlikely(id >= vq->packed.vring.num)) {
1365 BAD_RING(vq, "id %u out of range\n", id);
1366 return NULL;
1367 }
1368 if (unlikely(!vq->packed.desc_state[id].data)) {
1369 BAD_RING(vq, "id %u is not a head!\n", id);
1370 return NULL;
1371 }
1372
1373 /* detach_buf_packed clears data, so grab it now. */
1374 ret = vq->packed.desc_state[id].data;
1375 detach_buf_packed(vq, id, ctx);
1376
1377 vq->last_used_idx += vq->packed.desc_state[id].num;
1378 if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
1379 vq->last_used_idx -= vq->packed.vring.num;
1380 vq->packed.used_wrap_counter ^= 1;
1381 }
1382
Tiwei Bief51f9822018-11-21 18:03:28 +08001383 /*
1384 * If we expect an interrupt for the next entry, tell host
1385 * by writing event index and flush out the write before
1386 * the read in the next get_buf call.
1387 */
1388 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
1389 virtio_store_mb(vq->weak_barriers,
1390 &vq->packed.vring.driver->off_wrap,
1391 cpu_to_le16(vq->last_used_idx |
1392 (vq->packed.used_wrap_counter <<
1393 VRING_PACKED_EVENT_F_WRAP_CTR)));
1394
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001395 LAST_ADD_TIME_INVALID(vq);
1396
1397 END_USE(vq);
1398 return ret;
1399}
1400
1401static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
1402{
1403 struct vring_virtqueue *vq = to_vvq(_vq);
1404
1405 if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
1406 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1407 vq->packed.vring.driver->flags =
1408 cpu_to_le16(vq->packed.event_flags_shadow);
1409 }
1410}
1411
1412static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
1413{
1414 struct vring_virtqueue *vq = to_vvq(_vq);
1415
1416 START_USE(vq);
1417
1418 /*
1419 * We optimistically turn back on interrupts, then check if there was
1420 * more to do.
1421 */
1422
Tiwei Bief51f9822018-11-21 18:03:28 +08001423 if (vq->event) {
1424 vq->packed.vring.driver->off_wrap =
1425 cpu_to_le16(vq->last_used_idx |
1426 (vq->packed.used_wrap_counter <<
1427 VRING_PACKED_EVENT_F_WRAP_CTR));
1428 /*
1429 * We need to update event offset and event wrap
1430 * counter first before updating event flags.
1431 */
1432 virtio_wmb(vq->weak_barriers);
1433 }
1434
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001435 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
Tiwei Bief51f9822018-11-21 18:03:28 +08001436 vq->packed.event_flags_shadow = vq->event ?
1437 VRING_PACKED_EVENT_FLAG_DESC :
1438 VRING_PACKED_EVENT_FLAG_ENABLE;
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001439 vq->packed.vring.driver->flags =
1440 cpu_to_le16(vq->packed.event_flags_shadow);
1441 }
1442
1443 END_USE(vq);
1444 return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
1445 VRING_PACKED_EVENT_F_WRAP_CTR);
1446}
1447
1448static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
1449{
1450 struct vring_virtqueue *vq = to_vvq(_vq);
1451 bool wrap_counter;
1452 u16 used_idx;
1453
1454 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1455 used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1456
1457 return is_used_desc_packed(vq, used_idx, wrap_counter);
1458}
1459
1460static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
1461{
1462 struct vring_virtqueue *vq = to_vvq(_vq);
1463 u16 used_idx, wrap_counter;
Tiwei Bief51f9822018-11-21 18:03:28 +08001464 u16 bufs;
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001465
1466 START_USE(vq);
1467
1468 /*
1469 * We optimistically turn back on interrupts, then check if there was
1470 * more to do.
1471 */
1472
Tiwei Bief51f9822018-11-21 18:03:28 +08001473 if (vq->event) {
1474 /* TODO: tune this threshold */
1475 bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
1476 wrap_counter = vq->packed.used_wrap_counter;
1477
1478 used_idx = vq->last_used_idx + bufs;
1479 if (used_idx >= vq->packed.vring.num) {
1480 used_idx -= vq->packed.vring.num;
1481 wrap_counter ^= 1;
1482 }
1483
1484 vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
1485 (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1486
1487 /*
1488 * We need to update event offset and event wrap
1489 * counter first before updating event flags.
1490 */
1491 virtio_wmb(vq->weak_barriers);
1492 } else {
1493 used_idx = vq->last_used_idx;
1494 wrap_counter = vq->packed.used_wrap_counter;
1495 }
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001496
1497 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
Tiwei Bief51f9822018-11-21 18:03:28 +08001498 vq->packed.event_flags_shadow = vq->event ?
1499 VRING_PACKED_EVENT_FLAG_DESC :
1500 VRING_PACKED_EVENT_FLAG_ENABLE;
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001501 vq->packed.vring.driver->flags =
1502 cpu_to_le16(vq->packed.event_flags_shadow);
1503 }
1504
1505 /*
1506 * We need to update event suppression structure first
1507 * before re-checking for more used buffers.
1508 */
1509 virtio_mb(vq->weak_barriers);
1510
1511 if (is_used_desc_packed(vq, used_idx, wrap_counter)) {
1512 END_USE(vq);
1513 return false;
1514 }
1515
1516 END_USE(vq);
1517 return true;
1518}
1519
1520static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
1521{
1522 struct vring_virtqueue *vq = to_vvq(_vq);
1523 unsigned int i;
1524 void *buf;
1525
1526 START_USE(vq);
1527
1528 for (i = 0; i < vq->packed.vring.num; i++) {
1529 if (!vq->packed.desc_state[i].data)
1530 continue;
1531 /* detach_buf clears data, so grab it now. */
1532 buf = vq->packed.desc_state[i].data;
1533 detach_buf_packed(vq, i, NULL);
1534 END_USE(vq);
1535 return buf;
1536 }
1537 /* That should have freed everything. */
1538 BUG_ON(vq->vq.num_free != vq->packed.vring.num);
1539
1540 END_USE(vq);
1541 return NULL;
1542}
1543
1544static struct virtqueue *vring_create_virtqueue_packed(
1545 unsigned int index,
1546 unsigned int num,
1547 unsigned int vring_align,
1548 struct virtio_device *vdev,
1549 bool weak_barriers,
1550 bool may_reduce_num,
1551 bool context,
1552 bool (*notify)(struct virtqueue *),
1553 void (*callback)(struct virtqueue *),
1554 const char *name)
1555{
1556 struct vring_virtqueue *vq;
1557 struct vring_packed_desc *ring;
1558 struct vring_packed_desc_event *driver, *device;
1559 dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
1560 size_t ring_size_in_bytes, event_size_in_bytes;
1561 unsigned int i;
1562
1563 ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
1564
1565 ring = vring_alloc_queue(vdev, ring_size_in_bytes,
1566 &ring_dma_addr,
1567 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1568 if (!ring)
1569 goto err_ring;
1570
1571 event_size_in_bytes = sizeof(struct vring_packed_desc_event);
1572
1573 driver = vring_alloc_queue(vdev, event_size_in_bytes,
1574 &driver_event_dma_addr,
1575 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1576 if (!driver)
1577 goto err_driver;
1578
1579 device = vring_alloc_queue(vdev, event_size_in_bytes,
1580 &device_event_dma_addr,
1581 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1582 if (!device)
1583 goto err_device;
1584
1585 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
1586 if (!vq)
1587 goto err_vq;
1588
1589 vq->vq.callback = callback;
1590 vq->vq.vdev = vdev;
1591 vq->vq.name = name;
1592 vq->vq.num_free = num;
1593 vq->vq.index = index;
1594 vq->we_own_ring = true;
1595 vq->notify = notify;
1596 vq->weak_barriers = weak_barriers;
1597 vq->broken = false;
1598 vq->last_used_idx = 0;
1599 vq->num_added = 0;
1600 vq->packed_ring = true;
1601 vq->use_dma_api = vring_use_dma_api(vdev);
1602 list_add_tail(&vq->vq.list, &vdev->vqs);
1603#ifdef DEBUG
1604 vq->in_use = false;
1605 vq->last_add_time_valid = false;
1606#endif
1607
1608 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
1609 !context;
1610 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1611
1612 vq->packed.ring_dma_addr = ring_dma_addr;
1613 vq->packed.driver_event_dma_addr = driver_event_dma_addr;
1614 vq->packed.device_event_dma_addr = device_event_dma_addr;
1615
1616 vq->packed.ring_size_in_bytes = ring_size_in_bytes;
1617 vq->packed.event_size_in_bytes = event_size_in_bytes;
1618
1619 vq->packed.vring.num = num;
1620 vq->packed.vring.desc = ring;
1621 vq->packed.vring.driver = driver;
1622 vq->packed.vring.device = device;
1623
1624 vq->packed.next_avail_idx = 0;
1625 vq->packed.avail_wrap_counter = 1;
1626 vq->packed.used_wrap_counter = 1;
1627 vq->packed.event_flags_shadow = 0;
1628 vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
1629
1630 vq->packed.desc_state = kmalloc_array(num,
1631 sizeof(struct vring_desc_state_packed),
1632 GFP_KERNEL);
1633 if (!vq->packed.desc_state)
1634 goto err_desc_state;
1635
1636 memset(vq->packed.desc_state, 0,
1637 num * sizeof(struct vring_desc_state_packed));
1638
1639 /* Put everything in free lists. */
1640 vq->free_head = 0;
1641 for (i = 0; i < num-1; i++)
1642 vq->packed.desc_state[i].next = i + 1;
1643
1644 vq->packed.desc_extra = kmalloc_array(num,
1645 sizeof(struct vring_desc_extra_packed),
1646 GFP_KERNEL);
1647 if (!vq->packed.desc_extra)
1648 goto err_desc_extra;
1649
1650 memset(vq->packed.desc_extra, 0,
1651 num * sizeof(struct vring_desc_extra_packed));
1652
1653 /* No callback? Tell other side not to bother us. */
1654 if (!callback) {
1655 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1656 vq->packed.vring.driver->flags =
1657 cpu_to_le16(vq->packed.event_flags_shadow);
1658 }
1659
1660 return &vq->vq;
1661
1662err_desc_extra:
1663 kfree(vq->packed.desc_state);
1664err_desc_state:
1665 kfree(vq);
1666err_vq:
1667 vring_free_queue(vdev, event_size_in_bytes, device, ring_dma_addr);
1668err_device:
1669 vring_free_queue(vdev, event_size_in_bytes, driver, ring_dma_addr);
1670err_driver:
1671 vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
1672err_ring:
1673 return NULL;
1674}
1675
1676
1677/*
Tiwei Biee6f633e2018-11-21 18:03:20 +08001678 * Generic functions and exported symbols.
1679 */
1680
1681static inline int virtqueue_add(struct virtqueue *_vq,
1682 struct scatterlist *sgs[],
1683 unsigned int total_sg,
1684 unsigned int out_sgs,
1685 unsigned int in_sgs,
1686 void *data,
1687 void *ctx,
1688 gfp_t gfp)
1689{
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001690 struct vring_virtqueue *vq = to_vvq(_vq);
1691
1692 return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
1693 out_sgs, in_sgs, data, ctx, gfp) :
1694 virtqueue_add_split(_vq, sgs, total_sg,
1695 out_sgs, in_sgs, data, ctx, gfp);
Tiwei Biee6f633e2018-11-21 18:03:20 +08001696}
1697
1698/**
1699 * virtqueue_add_sgs - expose buffers to other end
1700 * @vq: the struct virtqueue we're talking about.
1701 * @sgs: array of terminated scatterlists.
1702 * @out_num: the number of scatterlists readable by other side
1703 * @in_num: the number of scatterlists which are writable (after readable ones)
1704 * @data: the token identifying the buffer.
1705 * @gfp: how to do memory allocations (if necessary).
1706 *
1707 * Caller must ensure we don't call this with other virtqueue operations
1708 * at the same time (except where noted).
1709 *
1710 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1711 */
1712int virtqueue_add_sgs(struct virtqueue *_vq,
1713 struct scatterlist *sgs[],
1714 unsigned int out_sgs,
1715 unsigned int in_sgs,
1716 void *data,
1717 gfp_t gfp)
1718{
1719 unsigned int i, total_sg = 0;
1720
1721 /* Count them first. */
1722 for (i = 0; i < out_sgs + in_sgs; i++) {
1723 struct scatterlist *sg;
1724
1725 for (sg = sgs[i]; sg; sg = sg_next(sg))
1726 total_sg++;
1727 }
1728 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
1729 data, NULL, gfp);
1730}
1731EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
1732
1733/**
1734 * virtqueue_add_outbuf - expose output buffers to other end
1735 * @vq: the struct virtqueue we're talking about.
1736 * @sg: scatterlist (must be well-formed and terminated!)
1737 * @num: the number of entries in @sg readable by other side
1738 * @data: the token identifying the buffer.
1739 * @gfp: how to do memory allocations (if necessary).
1740 *
1741 * Caller must ensure we don't call this with other virtqueue operations
1742 * at the same time (except where noted).
1743 *
1744 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1745 */
1746int virtqueue_add_outbuf(struct virtqueue *vq,
1747 struct scatterlist *sg, unsigned int num,
1748 void *data,
1749 gfp_t gfp)
1750{
1751 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
1752}
1753EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
1754
1755/**
1756 * virtqueue_add_inbuf - expose input buffers to other end
1757 * @vq: the struct virtqueue we're talking about.
1758 * @sg: scatterlist (must be well-formed and terminated!)
1759 * @num: the number of entries in @sg writable by other side
1760 * @data: the token identifying the buffer.
1761 * @gfp: how to do memory allocations (if necessary).
1762 *
1763 * Caller must ensure we don't call this with other virtqueue operations
1764 * at the same time (except where noted).
1765 *
1766 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1767 */
1768int virtqueue_add_inbuf(struct virtqueue *vq,
1769 struct scatterlist *sg, unsigned int num,
1770 void *data,
1771 gfp_t gfp)
1772{
1773 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
1774}
1775EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
1776
1777/**
1778 * virtqueue_add_inbuf_ctx - expose input buffers to other end
1779 * @vq: the struct virtqueue we're talking about.
1780 * @sg: scatterlist (must be well-formed and terminated!)
1781 * @num: the number of entries in @sg writable by other side
1782 * @data: the token identifying the buffer.
1783 * @ctx: extra context for the token
1784 * @gfp: how to do memory allocations (if necessary).
1785 *
1786 * Caller must ensure we don't call this with other virtqueue operations
1787 * at the same time (except where noted).
1788 *
1789 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1790 */
1791int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
1792 struct scatterlist *sg, unsigned int num,
1793 void *data,
1794 void *ctx,
1795 gfp_t gfp)
1796{
1797 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
1798}
1799EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
1800
1801/**
1802 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
1803 * @vq: the struct virtqueue
1804 *
1805 * Instead of virtqueue_kick(), you can do:
1806 * if (virtqueue_kick_prepare(vq))
1807 * virtqueue_notify(vq);
1808 *
1809 * This is sometimes useful because the virtqueue_kick_prepare() needs
1810 * to be serialized, but the actual virtqueue_notify() call does not.
1811 */
1812bool virtqueue_kick_prepare(struct virtqueue *_vq)
1813{
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001814 struct vring_virtqueue *vq = to_vvq(_vq);
1815
1816 return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
1817 virtqueue_kick_prepare_split(_vq);
Tiwei Biee6f633e2018-11-21 18:03:20 +08001818}
1819EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
1820
1821/**
1822 * virtqueue_notify - second half of split virtqueue_kick call.
1823 * @vq: the struct virtqueue
1824 *
1825 * This does not need to be serialized.
1826 *
1827 * Returns false if host notify failed or queue is broken, otherwise true.
1828 */
1829bool virtqueue_notify(struct virtqueue *_vq)
1830{
1831 struct vring_virtqueue *vq = to_vvq(_vq);
1832
1833 if (unlikely(vq->broken))
1834 return false;
1835
1836 /* Prod other side to tell it about changes. */
1837 if (!vq->notify(_vq)) {
1838 vq->broken = true;
1839 return false;
1840 }
1841 return true;
1842}
1843EXPORT_SYMBOL_GPL(virtqueue_notify);
1844
1845/**
1846 * virtqueue_kick - update after add_buf
1847 * @vq: the struct virtqueue
1848 *
1849 * After one or more virtqueue_add_* calls, invoke this to kick
1850 * the other side.
1851 *
1852 * Caller must ensure we don't call this with other virtqueue
1853 * operations at the same time (except where noted).
1854 *
1855 * Returns false if kick failed, otherwise true.
1856 */
1857bool virtqueue_kick(struct virtqueue *vq)
1858{
1859 if (virtqueue_kick_prepare(vq))
1860 return virtqueue_notify(vq);
1861 return true;
1862}
1863EXPORT_SYMBOL_GPL(virtqueue_kick);
1864
1865/**
1866 * virtqueue_get_buf - get the next used buffer
1867 * @vq: the struct virtqueue we're talking about.
1868 * @len: the length written into the buffer
1869 *
1870 * If the device wrote data into the buffer, @len will be set to the
1871 * amount written. This means you don't need to clear the buffer
1872 * beforehand to ensure there's no data leakage in the case of short
1873 * writes.
1874 *
1875 * Caller must ensure we don't call this with other virtqueue
1876 * operations at the same time (except where noted).
1877 *
1878 * Returns NULL if there are no used buffers, or the "data" token
1879 * handed to virtqueue_add_*().
1880 */
1881void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
1882 void **ctx)
1883{
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001884 struct vring_virtqueue *vq = to_vvq(_vq);
1885
1886 return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
1887 virtqueue_get_buf_ctx_split(_vq, len, ctx);
Tiwei Biee6f633e2018-11-21 18:03:20 +08001888}
1889EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
1890
1891void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
1892{
1893 return virtqueue_get_buf_ctx(_vq, len, NULL);
1894}
1895EXPORT_SYMBOL_GPL(virtqueue_get_buf);
Tiwei Biee6f633e2018-11-21 18:03:20 +08001896/**
1897 * virtqueue_disable_cb - disable callbacks
1898 * @vq: the struct virtqueue we're talking about.
1899 *
1900 * Note that this is not necessarily synchronous, hence unreliable and only
1901 * useful as an optimization.
1902 *
1903 * Unlike other operations, this need not be serialized.
1904 */
1905void virtqueue_disable_cb(struct virtqueue *_vq)
1906{
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001907 struct vring_virtqueue *vq = to_vvq(_vq);
1908
1909 if (vq->packed_ring)
1910 virtqueue_disable_cb_packed(_vq);
1911 else
1912 virtqueue_disable_cb_split(_vq);
Tiwei Biee6f633e2018-11-21 18:03:20 +08001913}
1914EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
1915
1916/**
1917 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
1918 * @vq: the struct virtqueue we're talking about.
1919 *
1920 * This re-enables callbacks; it returns current queue state
1921 * in an opaque unsigned value. This value should be later tested by
1922 * virtqueue_poll, to detect a possible race between the driver checking for
1923 * more work, and enabling callbacks.
1924 *
1925 * Caller must ensure we don't call this with other virtqueue
1926 * operations at the same time (except where noted).
1927 */
1928unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
1929{
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001930 struct vring_virtqueue *vq = to_vvq(_vq);
1931
1932 return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
1933 virtqueue_enable_cb_prepare_split(_vq);
Tiwei Biee6f633e2018-11-21 18:03:20 +08001934}
1935EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
1936
1937/**
1938 * virtqueue_poll - query pending used buffers
1939 * @vq: the struct virtqueue we're talking about.
1940 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
1941 *
1942 * Returns "true" if there are pending used buffers in the queue.
1943 *
1944 * This does not need to be serialized.
1945 */
1946bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
1947{
1948 struct vring_virtqueue *vq = to_vvq(_vq);
1949
1950 virtio_mb(vq->weak_barriers);
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001951 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
1952 virtqueue_poll_split(_vq, last_used_idx);
Tiwei Biee6f633e2018-11-21 18:03:20 +08001953}
1954EXPORT_SYMBOL_GPL(virtqueue_poll);
1955
1956/**
1957 * virtqueue_enable_cb - restart callbacks after disable_cb.
1958 * @vq: the struct virtqueue we're talking about.
1959 *
1960 * This re-enables callbacks; it returns "false" if there are pending
1961 * buffers in the queue, to detect a possible race between the driver
1962 * checking for more work, and enabling callbacks.
1963 *
1964 * Caller must ensure we don't call this with other virtqueue
1965 * operations at the same time (except where noted).
1966 */
1967bool virtqueue_enable_cb(struct virtqueue *_vq)
1968{
1969 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
1970
1971 return !virtqueue_poll(_vq, last_used_idx);
1972}
1973EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
1974
1975/**
1976 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
1977 * @vq: the struct virtqueue we're talking about.
1978 *
1979 * This re-enables callbacks but hints to the other side to delay
1980 * interrupts until most of the available buffers have been processed;
1981 * it returns "false" if there are many pending buffers in the queue,
1982 * to detect a possible race between the driver checking for more work,
1983 * and enabling callbacks.
1984 *
1985 * Caller must ensure we don't call this with other virtqueue
1986 * operations at the same time (except where noted).
1987 */
1988bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
1989{
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001990 struct vring_virtqueue *vq = to_vvq(_vq);
1991
1992 return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
1993 virtqueue_enable_cb_delayed_split(_vq);
Tiwei Biee6f633e2018-11-21 18:03:20 +08001994}
1995EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
1996
Tiwei Bie138fd252018-11-21 18:03:19 +08001997/**
1998 * virtqueue_detach_unused_buf - detach first unused buffer
1999 * @vq: the struct virtqueue we're talking about.
2000 *
2001 * Returns NULL or the "data" token handed to virtqueue_add_*().
2002 * This is not valid on an active queue; it is useful only for device
2003 * shutdown.
2004 */
2005void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
2006{
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002007 struct vring_virtqueue *vq = to_vvq(_vq);
2008
2009 return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
2010 virtqueue_detach_unused_buf_split(_vq);
Tiwei Bie138fd252018-11-21 18:03:19 +08002011}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +03002012EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
Shirley Mac021eac2010-01-18 19:15:23 +05302013
Tiwei Bie138fd252018-11-21 18:03:19 +08002014static inline bool more_used(const struct vring_virtqueue *vq)
2015{
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002016 return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
Tiwei Bie138fd252018-11-21 18:03:19 +08002017}
2018
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002019irqreturn_t vring_interrupt(int irq, void *_vq)
2020{
2021 struct vring_virtqueue *vq = to_vvq(_vq);
2022
2023 if (!more_used(vq)) {
2024 pr_debug("virtqueue interrupt with no work for %p\n", vq);
2025 return IRQ_NONE;
2026 }
2027
2028 if (unlikely(vq->broken))
2029 return IRQ_HANDLED;
2030
2031 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
Rusty Russell18445c42008-02-04 23:49:57 -05002032 if (vq->vq.callback)
2033 vq->vq.callback(&vq->vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002034
2035 return IRQ_HANDLED;
2036}
Rusty Russellc6fd4702008-02-04 23:50:05 -05002037EXPORT_SYMBOL_GPL(vring_interrupt);
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002038
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002039/* Only available for split ring */
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002040struct virtqueue *__vring_new_virtqueue(unsigned int index,
2041 struct vring vring,
2042 struct virtio_device *vdev,
2043 bool weak_barriers,
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +02002044 bool context,
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002045 bool (*notify)(struct virtqueue *),
2046 void (*callback)(struct virtqueue *),
2047 const char *name)
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002048{
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002049 unsigned int i;
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002050 struct vring_virtqueue *vq;
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002051
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002052 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2053 return NULL;
2054
Tiwei Biecbeedb72018-11-21 18:03:24 +08002055 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002056 if (!vq)
2057 return NULL;
2058
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002059 vq->packed_ring = false;
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002060 vq->vq.callback = callback;
2061 vq->vq.vdev = vdev;
Rusty Russell9499f5e2009-06-12 22:16:35 -06002062 vq->vq.name = name;
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002063 vq->vq.num_free = vring.num;
Rusty Russell06ca2872012-10-16 23:56:14 +10302064 vq->vq.index = index;
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002065 vq->we_own_ring = false;
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002066 vq->notify = notify;
Rusty Russell7b21e342012-01-12 15:44:42 +10302067 vq->weak_barriers = weak_barriers;
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002068 vq->broken = false;
2069 vq->last_used_idx = 0;
2070 vq->num_added = 0;
Tiwei Biefb3fba62018-11-21 18:03:26 +08002071 vq->use_dma_api = vring_use_dma_api(vdev);
Rusty Russell9499f5e2009-06-12 22:16:35 -06002072 list_add_tail(&vq->vq.list, &vdev->vqs);
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002073#ifdef DEBUG
2074 vq->in_use = false;
Rusty Russelle93300b2012-01-12 15:44:43 +10302075 vq->last_add_time_valid = false;
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002076#endif
2077
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +02002078 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
2079 !context;
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +03002080 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +01002081
Tiwei Bied79dca72018-11-21 18:03:25 +08002082 vq->split.queue_dma_addr = 0;
2083 vq->split.queue_size_in_bytes = 0;
2084
Tiwei Biee593bf92018-11-21 18:03:21 +08002085 vq->split.vring = vring;
2086 vq->split.avail_flags_shadow = 0;
2087 vq->split.avail_idx_shadow = 0;
2088
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002089 /* No callback? Tell other side not to bother us. */
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -08002090 if (!callback) {
Tiwei Biee593bf92018-11-21 18:03:21 +08002091 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
Ladi Prosek0ea1e4a2016-08-31 14:00:04 +02002092 if (!vq->event)
Tiwei Biee593bf92018-11-21 18:03:21 +08002093 vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
2094 vq->split.avail_flags_shadow);
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -08002095 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002096
Tiwei Biecbeedb72018-11-21 18:03:24 +08002097 vq->split.desc_state = kmalloc_array(vring.num,
2098 sizeof(struct vring_desc_state_split), GFP_KERNEL);
2099 if (!vq->split.desc_state) {
2100 kfree(vq);
2101 return NULL;
2102 }
2103
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002104 /* Put everything in free lists. */
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002105 vq->free_head = 0;
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002106 for (i = 0; i < vring.num-1; i++)
Tiwei Biee593bf92018-11-21 18:03:21 +08002107 vq->split.vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
Tiwei Biecbeedb72018-11-21 18:03:24 +08002108 memset(vq->split.desc_state, 0, vring.num *
2109 sizeof(struct vring_desc_state_split));
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002110
2111 return &vq->vq;
2112}
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002113EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
2114
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002115struct virtqueue *vring_create_virtqueue(
2116 unsigned int index,
2117 unsigned int num,
2118 unsigned int vring_align,
2119 struct virtio_device *vdev,
2120 bool weak_barriers,
2121 bool may_reduce_num,
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +02002122 bool context,
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002123 bool (*notify)(struct virtqueue *),
2124 void (*callback)(struct virtqueue *),
2125 const char *name)
2126{
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002127
2128 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2129 return vring_create_virtqueue_packed(index, num, vring_align,
2130 vdev, weak_barriers, may_reduce_num,
2131 context, notify, callback, name);
2132
Tiwei Bied79dca72018-11-21 18:03:25 +08002133 return vring_create_virtqueue_split(index, num, vring_align,
2134 vdev, weak_barriers, may_reduce_num,
2135 context, notify, callback, name);
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002136}
2137EXPORT_SYMBOL_GPL(vring_create_virtqueue);
2138
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002139/* Only available for split ring */
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002140struct virtqueue *vring_new_virtqueue(unsigned int index,
2141 unsigned int num,
2142 unsigned int vring_align,
2143 struct virtio_device *vdev,
2144 bool weak_barriers,
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +02002145 bool context,
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002146 void *pages,
2147 bool (*notify)(struct virtqueue *vq),
2148 void (*callback)(struct virtqueue *vq),
2149 const char *name)
2150{
2151 struct vring vring;
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002152
2153 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2154 return NULL;
2155
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002156 vring_init(&vring, num, pages, vring_align);
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +02002157 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002158 notify, callback, name);
2159}
Rusty Russellc6fd4702008-02-04 23:50:05 -05002160EXPORT_SYMBOL_GPL(vring_new_virtqueue);
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002161
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002162void vring_del_virtqueue(struct virtqueue *_vq)
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002163{
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002164 struct vring_virtqueue *vq = to_vvq(_vq);
2165
2166 if (vq->we_own_ring) {
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002167 if (vq->packed_ring) {
2168 vring_free_queue(vq->vq.vdev,
2169 vq->packed.ring_size_in_bytes,
2170 vq->packed.vring.desc,
2171 vq->packed.ring_dma_addr);
2172
2173 vring_free_queue(vq->vq.vdev,
2174 vq->packed.event_size_in_bytes,
2175 vq->packed.vring.driver,
2176 vq->packed.driver_event_dma_addr);
2177
2178 vring_free_queue(vq->vq.vdev,
2179 vq->packed.event_size_in_bytes,
2180 vq->packed.vring.device,
2181 vq->packed.device_event_dma_addr);
2182
2183 kfree(vq->packed.desc_state);
2184 kfree(vq->packed.desc_extra);
2185 } else {
2186 vring_free_queue(vq->vq.vdev,
2187 vq->split.queue_size_in_bytes,
2188 vq->split.vring.desc,
2189 vq->split.queue_dma_addr);
2190
2191 kfree(vq->split.desc_state);
2192 }
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002193 }
2194 list_del(&_vq->list);
2195 kfree(vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002196}
Rusty Russellc6fd4702008-02-04 23:50:05 -05002197EXPORT_SYMBOL_GPL(vring_del_virtqueue);
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002198
Rusty Russelle34f8722008-07-25 12:06:13 -05002199/* Manipulates transport-specific feature bits. */
2200void vring_transport_features(struct virtio_device *vdev)
2201{
2202 unsigned int i;
2203
2204 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
2205 switch (i) {
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +01002206 case VIRTIO_RING_F_INDIRECT_DESC:
2207 break;
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +03002208 case VIRTIO_RING_F_EVENT_IDX:
2209 break;
Michael S. Tsirkin747ae342014-12-01 15:52:40 +02002210 case VIRTIO_F_VERSION_1:
2211 break;
Michael S. Tsirkin1a937692016-04-18 12:58:14 +03002212 case VIRTIO_F_IOMMU_PLATFORM:
2213 break;
Rusty Russelle34f8722008-07-25 12:06:13 -05002214 default:
2215 /* We don't understand this bit. */
Michael S. Tsirkine16e12b2014-10-07 16:39:42 +02002216 __virtio_clear_bit(vdev, i);
Rusty Russelle34f8722008-07-25 12:06:13 -05002217 }
2218 }
2219}
2220EXPORT_SYMBOL_GPL(vring_transport_features);
2221
Rusty Russell5dfc1762012-01-12 15:44:42 +10302222/**
2223 * virtqueue_get_vring_size - return the size of the virtqueue's vring
2224 * @vq: the struct virtqueue containing the vring of interest.
2225 *
2226 * Returns the size of the vring. This is mainly used for boasting to
2227 * userspace. Unlike other operations, this need not be serialized.
2228 */
Rick Jones8f9f4662011-10-19 08:10:59 +00002229unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
2230{
2231
2232 struct vring_virtqueue *vq = to_vvq(_vq);
2233
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002234 return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
Rick Jones8f9f4662011-10-19 08:10:59 +00002235}
2236EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
2237
Heinz Graalfsb3b32c92013-10-29 09:40:19 +10302238bool virtqueue_is_broken(struct virtqueue *_vq)
2239{
2240 struct vring_virtqueue *vq = to_vvq(_vq);
2241
2242 return vq->broken;
2243}
2244EXPORT_SYMBOL_GPL(virtqueue_is_broken);
2245
Rusty Russelle2dcdfe2014-04-28 11:15:08 +09302246/*
2247 * This should prevent the device from being used, allowing drivers to
2248 * recover. You may need to grab appropriate locks to flush.
2249 */
2250void virtio_break_device(struct virtio_device *dev)
2251{
2252 struct virtqueue *_vq;
2253
2254 list_for_each_entry(_vq, &dev->vqs, list) {
2255 struct vring_virtqueue *vq = to_vvq(_vq);
2256 vq->broken = true;
2257 }
2258}
2259EXPORT_SYMBOL_GPL(virtio_break_device);
2260
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002261dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
Cornelia Huck89062652014-10-07 16:39:47 +02002262{
2263 struct vring_virtqueue *vq = to_vvq(_vq);
2264
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002265 BUG_ON(!vq->we_own_ring);
Cornelia Huck89062652014-10-07 16:39:47 +02002266
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002267 if (vq->packed_ring)
2268 return vq->packed.ring_dma_addr;
2269
Tiwei Bied79dca72018-11-21 18:03:25 +08002270 return vq->split.queue_dma_addr;
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002271}
2272EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
2273
2274dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
Cornelia Huck89062652014-10-07 16:39:47 +02002275{
2276 struct vring_virtqueue *vq = to_vvq(_vq);
2277
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002278 BUG_ON(!vq->we_own_ring);
2279
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002280 if (vq->packed_ring)
2281 return vq->packed.driver_event_dma_addr;
2282
Tiwei Bied79dca72018-11-21 18:03:25 +08002283 return vq->split.queue_dma_addr +
Tiwei Biee593bf92018-11-21 18:03:21 +08002284 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
Cornelia Huck89062652014-10-07 16:39:47 +02002285}
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002286EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
2287
2288dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
2289{
2290 struct vring_virtqueue *vq = to_vvq(_vq);
2291
2292 BUG_ON(!vq->we_own_ring);
2293
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002294 if (vq->packed_ring)
2295 return vq->packed.device_event_dma_addr;
2296
Tiwei Bied79dca72018-11-21 18:03:25 +08002297 return vq->split.queue_dma_addr +
Tiwei Biee593bf92018-11-21 18:03:21 +08002298 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002299}
2300EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
2301
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002302/* Only available for split ring */
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002303const struct vring *virtqueue_get_vring(struct virtqueue *vq)
2304{
Tiwei Biee593bf92018-11-21 18:03:21 +08002305 return &to_vvq(vq)->split.vring;
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002306}
2307EXPORT_SYMBOL_GPL(virtqueue_get_vring);
Cornelia Huck89062652014-10-07 16:39:47 +02002308
Rusty Russellc6fd4702008-02-04 23:50:05 -05002309MODULE_LICENSE("GPL");