blob: ba562d68dc048902958f160e865842f68a4335e5 [file] [log] [blame]
Erel Geron5d38f322019-09-11 14:51:20 +02001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Virtio vhost-user driver
4 *
5 * Copyright(c) 2019 Intel Corporation
6 *
Johannes Bergbf9f80c2019-10-08 17:43:21 +02007 * This driver allows virtio devices to be used over a vhost-user socket.
Erel Geron5d38f322019-09-11 14:51:20 +02008 *
9 * Guest devices can be instantiated by kernel module or command line
10 * parameters. One device will be created for each parameter. Syntax:
11 *
Johannes Bergbf9f80c2019-10-08 17:43:21 +020012 * virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]
Erel Geron5d38f322019-09-11 14:51:20 +020013 * where:
14 * <socket> := vhost-user socket path to connect
15 * <virtio_id> := virtio device id (as in virtio_ids.h)
16 * <platform_id> := (optional) platform device id
17 *
18 * example:
19 * virtio_uml.device=/var/uml.socket:1
20 *
21 * Based on Virtio MMIO driver by Pawel Moll, copyright 2011-2014, ARM Ltd.
22 */
23#include <linux/module.h>
Vincent Whitchurchdb0dd9c2021-12-21 10:04:46 +010024#include <linux/of.h>
Erel Geron5d38f322019-09-11 14:51:20 +020025#include <linux/platform_device.h>
26#include <linux/slab.h>
27#include <linux/virtio.h>
28#include <linux/virtio_config.h>
29#include <linux/virtio_ring.h>
Johannes Berg88ce6422020-02-13 14:26:47 +010030#include <linux/time-internal.h>
Johannes Berg21976f22021-06-25 10:34:36 +020031#include <linux/virtio-uml.h>
Erel Geron5d38f322019-09-11 14:51:20 +020032#include <shared/as-layout.h>
33#include <irq_kern.h>
34#include <init.h>
35#include <os.h>
36#include "vhost_user.h"
37
Erel Geron5d38f322019-09-11 14:51:20 +020038#define MAX_SUPPORTED_QUEUE_SIZE 256
39
40#define to_virtio_uml_device(_vdev) \
41 container_of(_vdev, struct virtio_uml_device, vdev)
42
Johannes Berg04e5b1f2019-09-18 21:24:13 +020043struct virtio_uml_platform_data {
44 u32 virtio_device_id;
45 const char *socket_path;
46 struct work_struct conn_broken_wk;
47 struct platform_device *pdev;
48};
49
Erel Geron5d38f322019-09-11 14:51:20 +020050struct virtio_uml_device {
51 struct virtio_device vdev;
52 struct platform_device *pdev;
Vincent Whitchurchdb0dd9c2021-12-21 10:04:46 +010053 struct virtio_uml_platform_data *pdata;
Erel Geron5d38f322019-09-11 14:51:20 +020054
Johannes Bergdd9ada52020-02-13 14:26:46 +010055 spinlock_t sock_lock;
Johannes Bergaaf58002020-12-02 12:59:51 +010056 int sock, req_fd, irq;
Erel Geron5d38f322019-09-11 14:51:20 +020057 u64 features;
58 u64 protocol_features;
59 u8 status;
Johannes Berg04e5b1f2019-09-18 21:24:13 +020060 u8 registered:1;
Johannes Berg1fcf9da2020-12-15 10:52:25 +010061 u8 suspended:1;
Johannes Berg43c590c2021-03-05 13:19:59 +010062 u8 no_vq_suspend:1;
Johannes Bergc8177ab2020-12-15 10:52:24 +010063
64 u8 config_changed_irq:1;
65 uint64_t vq_irq_vq_map;
Erel Geron5d38f322019-09-11 14:51:20 +020066};
67
68struct virtio_uml_vq_info {
69 int kick_fd, call_fd;
70 char name[32];
Johannes Berg9b845122020-12-15 10:52:23 +010071 bool suspended;
Erel Geron5d38f322019-09-11 14:51:20 +020072};
73
74extern unsigned long long physmem_size, highmem;
75
Joe Perches4c5a77052020-04-11 09:28:08 -070076#define vu_err(vu_dev, ...) dev_err(&(vu_dev)->pdev->dev, ##__VA_ARGS__)
Erel Geron5d38f322019-09-11 14:51:20 +020077
78/* Vhost-user protocol */
79
80static int full_sendmsg_fds(int fd, const void *buf, unsigned int len,
81 const int *fds, unsigned int fds_num)
82{
83 int rc;
84
85 do {
86 rc = os_sendmsg_fds(fd, buf, len, fds, fds_num);
87 if (rc > 0) {
88 buf += rc;
89 len -= rc;
90 fds = NULL;
91 fds_num = 0;
92 }
93 } while (len && (rc >= 0 || rc == -EINTR));
94
95 if (rc < 0)
96 return rc;
97 return 0;
98}
99
Johannes Berg7e607462019-09-24 09:21:17 +0200100static int full_read(int fd, void *buf, int len, bool abortable)
Erel Geron5d38f322019-09-11 14:51:20 +0200101{
102 int rc;
103
Johannes Berg10c2b5a2020-12-15 10:52:22 +0100104 if (!len)
105 return 0;
106
Erel Geron5d38f322019-09-11 14:51:20 +0200107 do {
108 rc = os_read_file(fd, buf, len);
109 if (rc > 0) {
110 buf += rc;
111 len -= rc;
112 }
Johannes Berg7e607462019-09-24 09:21:17 +0200113 } while (len && (rc > 0 || rc == -EINTR || (!abortable && rc == -EAGAIN)));
Erel Geron5d38f322019-09-11 14:51:20 +0200114
115 if (rc < 0)
116 return rc;
117 if (rc == 0)
118 return -ECONNRESET;
119 return 0;
120}
121
Johannes Berg2cd097b2019-09-11 14:51:21 +0200122static int vhost_user_recv_header(int fd, struct vhost_user_msg *msg)
Erel Geron5d38f322019-09-11 14:51:20 +0200123{
Johannes Berg7e607462019-09-24 09:21:17 +0200124 return full_read(fd, msg, sizeof(msg->header), true);
Erel Geron5d38f322019-09-11 14:51:20 +0200125}
126
Johannes Berg04e5b1f2019-09-18 21:24:13 +0200127static int vhost_user_recv(struct virtio_uml_device *vu_dev,
128 int fd, struct vhost_user_msg *msg,
Johannes Berg88ce6422020-02-13 14:26:47 +0100129 size_t max_payload_size, bool wait)
Erel Geron5d38f322019-09-11 14:51:20 +0200130{
131 size_t size;
Johannes Berg88ce6422020-02-13 14:26:47 +0100132 int rc;
133
134 /*
135 * In virtio time-travel mode, we're handling all the vhost-user
136 * FDs by polling them whenever appropriate. However, we may get
137 * into a situation where we're sending out an interrupt message
138 * to a device (e.g. a net device) and need to handle a simulation
139 * time message while doing so, e.g. one that tells us to update
140 * our idea of how long we can run without scheduling.
141 *
142 * Thus, we need to not just read() from the given fd, but need
143 * to also handle messages for the simulation time - this function
144 * does that for us while waiting for the given fd to be readable.
145 */
146 if (wait)
147 time_travel_wait_readable(fd);
148
149 rc = vhost_user_recv_header(fd, msg);
Erel Geron5d38f322019-09-11 14:51:20 +0200150
Johannes Berg04e5b1f2019-09-18 21:24:13 +0200151 if (rc == -ECONNRESET && vu_dev->registered) {
152 struct virtio_uml_platform_data *pdata;
153
Vincent Whitchurchdb0dd9c2021-12-21 10:04:46 +0100154 pdata = vu_dev->pdata;
Johannes Berg04e5b1f2019-09-18 21:24:13 +0200155
156 virtio_break_device(&vu_dev->vdev);
157 schedule_work(&pdata->conn_broken_wk);
158 }
Erel Geron5d38f322019-09-11 14:51:20 +0200159 if (rc)
160 return rc;
161 size = msg->header.size;
162 if (size > max_payload_size)
163 return -EPROTO;
Johannes Berg7e607462019-09-24 09:21:17 +0200164 return full_read(fd, &msg->payload, size, false);
Johannes Berg2cd097b2019-09-11 14:51:21 +0200165}
166
167static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
168 struct vhost_user_msg *msg,
169 size_t max_payload_size)
170{
Johannes Berg88ce6422020-02-13 14:26:47 +0100171 int rc = vhost_user_recv(vu_dev, vu_dev->sock, msg,
172 max_payload_size, true);
Johannes Berg2cd097b2019-09-11 14:51:21 +0200173
174 if (rc)
175 return rc;
176
177 if (msg->header.flags != (VHOST_USER_FLAG_REPLY | VHOST_USER_VERSION))
178 return -EPROTO;
179
180 return 0;
Erel Geron5d38f322019-09-11 14:51:20 +0200181}
182
183static int vhost_user_recv_u64(struct virtio_uml_device *vu_dev,
184 u64 *value)
185{
186 struct vhost_user_msg msg;
Johannes Berg2cd097b2019-09-11 14:51:21 +0200187 int rc = vhost_user_recv_resp(vu_dev, &msg,
188 sizeof(msg.payload.integer));
Erel Geron5d38f322019-09-11 14:51:20 +0200189
190 if (rc)
191 return rc;
192 if (msg.header.size != sizeof(msg.payload.integer))
193 return -EPROTO;
194 *value = msg.payload.integer;
195 return 0;
196}
197
Johannes Berg2cd097b2019-09-11 14:51:21 +0200198static int vhost_user_recv_req(struct virtio_uml_device *vu_dev,
199 struct vhost_user_msg *msg,
200 size_t max_payload_size)
201{
Johannes Berg88ce6422020-02-13 14:26:47 +0100202 int rc = vhost_user_recv(vu_dev, vu_dev->req_fd, msg,
203 max_payload_size, false);
Johannes Berg2cd097b2019-09-11 14:51:21 +0200204
205 if (rc)
206 return rc;
207
Johannes Berg27eca5c2019-09-11 14:51:22 +0200208 if ((msg->header.flags & ~VHOST_USER_FLAG_NEED_REPLY) !=
209 VHOST_USER_VERSION)
Johannes Berg2cd097b2019-09-11 14:51:21 +0200210 return -EPROTO;
211
212 return 0;
213}
214
Erel Geron5d38f322019-09-11 14:51:20 +0200215static int vhost_user_send(struct virtio_uml_device *vu_dev,
Johannes Berg27eca5c2019-09-11 14:51:22 +0200216 bool need_response, struct vhost_user_msg *msg,
Erel Geron5d38f322019-09-11 14:51:20 +0200217 int *fds, size_t num_fds)
218{
219 size_t size = sizeof(msg->header) + msg->header.size;
Johannes Bergdd9ada52020-02-13 14:26:46 +0100220 unsigned long flags;
Johannes Berg27eca5c2019-09-11 14:51:22 +0200221 bool request_ack;
222 int rc;
Erel Geron5d38f322019-09-11 14:51:20 +0200223
224 msg->header.flags |= VHOST_USER_VERSION;
Johannes Berg27eca5c2019-09-11 14:51:22 +0200225
226 /*
227 * The need_response flag indicates that we already need a response,
228 * e.g. to read the features. In these cases, don't request an ACK as
229 * it is meaningless. Also request an ACK only if supported.
230 */
231 request_ack = !need_response;
232 if (!(vu_dev->protocol_features &
233 BIT_ULL(VHOST_USER_PROTOCOL_F_REPLY_ACK)))
234 request_ack = false;
235
236 if (request_ack)
237 msg->header.flags |= VHOST_USER_FLAG_NEED_REPLY;
238
Johannes Bergdd9ada52020-02-13 14:26:46 +0100239 spin_lock_irqsave(&vu_dev->sock_lock, flags);
Johannes Berg27eca5c2019-09-11 14:51:22 +0200240 rc = full_sendmsg_fds(vu_dev->sock, msg, size, fds, num_fds);
241 if (rc < 0)
Johannes Bergdd9ada52020-02-13 14:26:46 +0100242 goto out;
Johannes Berg27eca5c2019-09-11 14:51:22 +0200243
244 if (request_ack) {
245 uint64_t status;
246
247 rc = vhost_user_recv_u64(vu_dev, &status);
248 if (rc)
Johannes Bergdd9ada52020-02-13 14:26:46 +0100249 goto out;
Johannes Berg27eca5c2019-09-11 14:51:22 +0200250
251 if (status) {
252 vu_err(vu_dev, "slave reports error: %llu\n", status);
Johannes Bergdd9ada52020-02-13 14:26:46 +0100253 rc = -EIO;
254 goto out;
Johannes Berg27eca5c2019-09-11 14:51:22 +0200255 }
256 }
257
Johannes Bergdd9ada52020-02-13 14:26:46 +0100258out:
259 spin_unlock_irqrestore(&vu_dev->sock_lock, flags);
260 return rc;
Erel Geron5d38f322019-09-11 14:51:20 +0200261}
262
263static int vhost_user_send_no_payload(struct virtio_uml_device *vu_dev,
Johannes Berg27eca5c2019-09-11 14:51:22 +0200264 bool need_response, u32 request)
Erel Geron5d38f322019-09-11 14:51:20 +0200265{
266 struct vhost_user_msg msg = {
267 .header.request = request,
268 };
269
Johannes Berg27eca5c2019-09-11 14:51:22 +0200270 return vhost_user_send(vu_dev, need_response, &msg, NULL, 0);
Erel Geron5d38f322019-09-11 14:51:20 +0200271}
272
Johannes Berg2cd097b2019-09-11 14:51:21 +0200273static int vhost_user_send_no_payload_fd(struct virtio_uml_device *vu_dev,
274 u32 request, int fd)
275{
276 struct vhost_user_msg msg = {
277 .header.request = request,
278 };
279
Johannes Berg27eca5c2019-09-11 14:51:22 +0200280 return vhost_user_send(vu_dev, false, &msg, &fd, 1);
Johannes Berg2cd097b2019-09-11 14:51:21 +0200281}
282
Erel Geron5d38f322019-09-11 14:51:20 +0200283static int vhost_user_send_u64(struct virtio_uml_device *vu_dev,
284 u32 request, u64 value)
285{
286 struct vhost_user_msg msg = {
287 .header.request = request,
288 .header.size = sizeof(msg.payload.integer),
289 .payload.integer = value,
290 };
291
Johannes Berg27eca5c2019-09-11 14:51:22 +0200292 return vhost_user_send(vu_dev, false, &msg, NULL, 0);
Erel Geron5d38f322019-09-11 14:51:20 +0200293}
294
295static int vhost_user_set_owner(struct virtio_uml_device *vu_dev)
296{
Johannes Berg27eca5c2019-09-11 14:51:22 +0200297 return vhost_user_send_no_payload(vu_dev, false, VHOST_USER_SET_OWNER);
Erel Geron5d38f322019-09-11 14:51:20 +0200298}
299
300static int vhost_user_get_features(struct virtio_uml_device *vu_dev,
301 u64 *features)
302{
Johannes Berg27eca5c2019-09-11 14:51:22 +0200303 int rc = vhost_user_send_no_payload(vu_dev, true,
304 VHOST_USER_GET_FEATURES);
Erel Geron5d38f322019-09-11 14:51:20 +0200305
306 if (rc)
307 return rc;
308 return vhost_user_recv_u64(vu_dev, features);
309}
310
311static int vhost_user_set_features(struct virtio_uml_device *vu_dev,
312 u64 features)
313{
314 return vhost_user_send_u64(vu_dev, VHOST_USER_SET_FEATURES, features);
315}
316
317static int vhost_user_get_protocol_features(struct virtio_uml_device *vu_dev,
318 u64 *protocol_features)
319{
Johannes Berg27eca5c2019-09-11 14:51:22 +0200320 int rc = vhost_user_send_no_payload(vu_dev, true,
Erel Geron5d38f322019-09-11 14:51:20 +0200321 VHOST_USER_GET_PROTOCOL_FEATURES);
322
323 if (rc)
324 return rc;
325 return vhost_user_recv_u64(vu_dev, protocol_features);
326}
327
328static int vhost_user_set_protocol_features(struct virtio_uml_device *vu_dev,
329 u64 protocol_features)
330{
331 return vhost_user_send_u64(vu_dev, VHOST_USER_SET_PROTOCOL_FEATURES,
332 protocol_features);
333}
334
Johannes Berg27eca5c2019-09-11 14:51:22 +0200335static void vhost_user_reply(struct virtio_uml_device *vu_dev,
336 struct vhost_user_msg *msg, int response)
337{
338 struct vhost_user_msg reply = {
339 .payload.integer = response,
340 };
341 size_t size = sizeof(reply.header) + sizeof(reply.payload.integer);
342 int rc;
343
344 reply.header = msg->header;
345 reply.header.flags &= ~VHOST_USER_FLAG_NEED_REPLY;
346 reply.header.flags |= VHOST_USER_FLAG_REPLY;
347 reply.header.size = sizeof(reply.payload.integer);
348
349 rc = full_sendmsg_fds(vu_dev->req_fd, &reply, size, NULL, 0);
350
351 if (rc)
352 vu_err(vu_dev,
353 "sending reply to slave request failed: %d (size %zu)\n",
354 rc, size);
355}
356
Johannes Bergc8177ab2020-12-15 10:52:24 +0100357static irqreturn_t vu_req_read_message(struct virtio_uml_device *vu_dev,
358 struct time_travel_event *ev)
Johannes Berg2cd097b2019-09-11 14:51:21 +0200359{
Johannes Bergdd9ada52020-02-13 14:26:46 +0100360 struct virtqueue *vq;
Johannes Berg27eca5c2019-09-11 14:51:22 +0200361 int response = 1;
Johannes Berg2cd097b2019-09-11 14:51:21 +0200362 struct {
363 struct vhost_user_msg msg;
364 u8 extra_payload[512];
365 } msg;
366 int rc;
367
368 rc = vhost_user_recv_req(vu_dev, &msg.msg,
369 sizeof(msg.msg.payload) +
370 sizeof(msg.extra_payload));
371
372 if (rc)
373 return IRQ_NONE;
374
375 switch (msg.msg.header.request) {
376 case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG:
Johannes Bergc8177ab2020-12-15 10:52:24 +0100377 vu_dev->config_changed_irq = true;
Johannes Berg27eca5c2019-09-11 14:51:22 +0200378 response = 0;
Johannes Berg2cd097b2019-09-11 14:51:21 +0200379 break;
Johannes Bergdd9ada52020-02-13 14:26:46 +0100380 case VHOST_USER_SLAVE_VRING_CALL:
381 virtio_device_for_each_vq((&vu_dev->vdev), vq) {
382 if (vq->index == msg.msg.payload.vring_state.index) {
383 response = 0;
Johannes Bergc8177ab2020-12-15 10:52:24 +0100384 vu_dev->vq_irq_vq_map |= BIT_ULL(vq->index);
Johannes Bergdd9ada52020-02-13 14:26:46 +0100385 break;
386 }
387 }
388 break;
Johannes Berg2cd097b2019-09-11 14:51:21 +0200389 case VHOST_USER_SLAVE_IOTLB_MSG:
Michael S. Tsirkin321bd212020-06-24 18:24:33 -0400390 /* not supported - VIRTIO_F_ACCESS_PLATFORM */
Johannes Berg2cd097b2019-09-11 14:51:21 +0200391 case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
392 /* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */
393 default:
394 vu_err(vu_dev, "unexpected slave request %d\n",
395 msg.msg.header.request);
396 }
397
Johannes Berg1fcf9da2020-12-15 10:52:25 +0100398 if (ev && !vu_dev->suspended)
Johannes Bergc8177ab2020-12-15 10:52:24 +0100399 time_travel_add_irq_event(ev);
400
Johannes Berg27eca5c2019-09-11 14:51:22 +0200401 if (msg.msg.header.flags & VHOST_USER_FLAG_NEED_REPLY)
402 vhost_user_reply(vu_dev, &msg.msg, response);
403
Johannes Berg2cd097b2019-09-11 14:51:21 +0200404 return IRQ_HANDLED;
405}
406
Johannes Bergc8177ab2020-12-15 10:52:24 +0100407static irqreturn_t vu_req_interrupt(int irq, void *data)
408{
409 struct virtio_uml_device *vu_dev = data;
410 irqreturn_t ret = IRQ_HANDLED;
411
412 if (!um_irq_timetravel_handler_used())
413 ret = vu_req_read_message(vu_dev, NULL);
414
415 if (vu_dev->vq_irq_vq_map) {
416 struct virtqueue *vq;
417
418 virtio_device_for_each_vq((&vu_dev->vdev), vq) {
419 if (vu_dev->vq_irq_vq_map & BIT_ULL(vq->index))
420 vring_interrupt(0 /* ignored */, vq);
421 }
422 vu_dev->vq_irq_vq_map = 0;
423 } else if (vu_dev->config_changed_irq) {
424 virtio_config_changed(&vu_dev->vdev);
425 vu_dev->config_changed_irq = false;
426 }
427
428 return ret;
429}
430
431static void vu_req_interrupt_comm_handler(int irq, int fd, void *data,
432 struct time_travel_event *ev)
433{
434 vu_req_read_message(data, ev);
435}
436
Johannes Berg2cd097b2019-09-11 14:51:21 +0200437static int vhost_user_init_slave_req(struct virtio_uml_device *vu_dev)
438{
439 int rc, req_fds[2];
440
441 /* Use a pipe for slave req fd, SIGIO is not supported for eventfd */
442 rc = os_pipe(req_fds, true, true);
443 if (rc < 0)
444 return rc;
445 vu_dev->req_fd = req_fds[0];
446
Johannes Bergc8177ab2020-12-15 10:52:24 +0100447 rc = um_request_irq_tt(UM_IRQ_ALLOC, vu_dev->req_fd, IRQ_READ,
448 vu_req_interrupt, IRQF_SHARED,
449 vu_dev->pdev->name, vu_dev,
450 vu_req_interrupt_comm_handler);
Johannes Berg36d46a52020-12-02 12:59:50 +0100451 if (rc < 0)
Johannes Berg2cd097b2019-09-11 14:51:21 +0200452 goto err_close;
453
Johannes Bergaaf58002020-12-02 12:59:51 +0100454 vu_dev->irq = rc;
455
Johannes Berg2cd097b2019-09-11 14:51:21 +0200456 rc = vhost_user_send_no_payload_fd(vu_dev, VHOST_USER_SET_SLAVE_REQ_FD,
457 req_fds[1]);
458 if (rc)
459 goto err_free_irq;
460
461 goto out;
462
463err_free_irq:
Johannes Bergaaf58002020-12-02 12:59:51 +0100464 um_free_irq(vu_dev->irq, vu_dev);
Johannes Berg2cd097b2019-09-11 14:51:21 +0200465err_close:
466 os_close_file(req_fds[0]);
467out:
468 /* Close unused write end of request fds */
469 os_close_file(req_fds[1]);
470 return rc;
471}
472
Erel Geron5d38f322019-09-11 14:51:20 +0200473static int vhost_user_init(struct virtio_uml_device *vu_dev)
474{
475 int rc = vhost_user_set_owner(vu_dev);
476
477 if (rc)
478 return rc;
479 rc = vhost_user_get_features(vu_dev, &vu_dev->features);
480 if (rc)
481 return rc;
482
483 if (vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)) {
484 rc = vhost_user_get_protocol_features(vu_dev,
485 &vu_dev->protocol_features);
486 if (rc)
487 return rc;
488 vu_dev->protocol_features &= VHOST_USER_SUPPORTED_PROTOCOL_F;
489 rc = vhost_user_set_protocol_features(vu_dev,
490 vu_dev->protocol_features);
Johannes Berg2cd097b2019-09-11 14:51:21 +0200491 if (rc)
492 return rc;
Erel Geron5d38f322019-09-11 14:51:20 +0200493 }
Johannes Berg2cd097b2019-09-11 14:51:21 +0200494
495 if (vu_dev->protocol_features &
496 BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
497 rc = vhost_user_init_slave_req(vu_dev);
498 if (rc)
499 return rc;
500 }
501
502 return 0;
Erel Geron5d38f322019-09-11 14:51:20 +0200503}
504
505static void vhost_user_get_config(struct virtio_uml_device *vu_dev,
506 u32 offset, void *buf, u32 len)
507{
508 u32 cfg_size = offset + len;
509 struct vhost_user_msg *msg;
510 size_t payload_size = sizeof(msg->payload.config) + cfg_size;
511 size_t msg_size = sizeof(msg->header) + payload_size;
512 int rc;
513
514 if (!(vu_dev->protocol_features &
515 BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
516 return;
517
518 msg = kzalloc(msg_size, GFP_KERNEL);
519 if (!msg)
520 return;
521 msg->header.request = VHOST_USER_GET_CONFIG;
522 msg->header.size = payload_size;
523 msg->payload.config.offset = 0;
524 msg->payload.config.size = cfg_size;
525
Johannes Berg27eca5c2019-09-11 14:51:22 +0200526 rc = vhost_user_send(vu_dev, true, msg, NULL, 0);
Erel Geron5d38f322019-09-11 14:51:20 +0200527 if (rc) {
528 vu_err(vu_dev, "sending VHOST_USER_GET_CONFIG failed: %d\n",
529 rc);
530 goto free;
531 }
532
Johannes Berg2cd097b2019-09-11 14:51:21 +0200533 rc = vhost_user_recv_resp(vu_dev, msg, msg_size);
Erel Geron5d38f322019-09-11 14:51:20 +0200534 if (rc) {
535 vu_err(vu_dev,
536 "receiving VHOST_USER_GET_CONFIG response failed: %d\n",
537 rc);
538 goto free;
539 }
540
541 if (msg->header.size != payload_size ||
542 msg->payload.config.size != cfg_size) {
543 rc = -EPROTO;
544 vu_err(vu_dev,
545 "Invalid VHOST_USER_GET_CONFIG sizes (payload %d expected %zu, config %u expected %u)\n",
546 msg->header.size, payload_size,
547 msg->payload.config.size, cfg_size);
548 goto free;
549 }
550 memcpy(buf, msg->payload.config.payload + offset, len);
551
552free:
553 kfree(msg);
554}
555
556static void vhost_user_set_config(struct virtio_uml_device *vu_dev,
557 u32 offset, const void *buf, u32 len)
558{
559 struct vhost_user_msg *msg;
560 size_t payload_size = sizeof(msg->payload.config) + len;
561 size_t msg_size = sizeof(msg->header) + payload_size;
562 int rc;
563
564 if (!(vu_dev->protocol_features &
565 BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
566 return;
567
568 msg = kzalloc(msg_size, GFP_KERNEL);
569 if (!msg)
570 return;
571 msg->header.request = VHOST_USER_SET_CONFIG;
572 msg->header.size = payload_size;
573 msg->payload.config.offset = offset;
574 msg->payload.config.size = len;
575 memcpy(msg->payload.config.payload, buf, len);
576
Johannes Berg27eca5c2019-09-11 14:51:22 +0200577 rc = vhost_user_send(vu_dev, false, msg, NULL, 0);
Erel Geron5d38f322019-09-11 14:51:20 +0200578 if (rc)
579 vu_err(vu_dev, "sending VHOST_USER_SET_CONFIG failed: %d\n",
580 rc);
581
582 kfree(msg);
583}
584
585static int vhost_user_init_mem_region(u64 addr, u64 size, int *fd_out,
586 struct vhost_user_mem_region *region_out)
587{
588 unsigned long long mem_offset;
589 int rc = phys_mapping(addr, &mem_offset);
590
591 if (WARN(rc < 0, "phys_mapping of 0x%llx returned %d\n", addr, rc))
592 return -EFAULT;
593 *fd_out = rc;
594 region_out->guest_addr = addr;
595 region_out->user_addr = addr;
596 region_out->size = size;
597 region_out->mmap_offset = mem_offset;
598
599 /* Ensure mapping is valid for the entire region */
600 rc = phys_mapping(addr + size - 1, &mem_offset);
601 if (WARN(rc != *fd_out, "phys_mapping of 0x%llx failed: %d != %d\n",
602 addr + size - 1, rc, *fd_out))
603 return -EFAULT;
604 return 0;
605}
606
607static int vhost_user_set_mem_table(struct virtio_uml_device *vu_dev)
608{
609 struct vhost_user_msg msg = {
610 .header.request = VHOST_USER_SET_MEM_TABLE,
611 .header.size = sizeof(msg.payload.mem_regions),
612 .payload.mem_regions.num = 1,
613 };
614 unsigned long reserved = uml_reserved - uml_physmem;
615 int fds[2];
616 int rc;
617
618 /*
619 * This is a bit tricky, see also the comment with setup_physmem().
620 *
621 * Essentially, setup_physmem() uses a file to mmap() our physmem,
622 * but the code and data we *already* have is omitted. To us, this
623 * is no difference, since they both become part of our address
624 * space and memory consumption. To somebody looking in from the
625 * outside, however, it is different because the part of our memory
626 * consumption that's already part of the binary (code/data) is not
627 * mapped from the file, so it's not visible to another mmap from
628 * the file descriptor.
629 *
630 * Thus, don't advertise this space to the vhost-user slave. This
631 * means that the slave will likely abort or similar when we give
632 * it an address from the hidden range, since it's not marked as
633 * a valid address, but at least that way we detect the issue and
634 * don't just have the slave read an all-zeroes buffer from the
635 * shared memory file, or write something there that we can never
636 * see (depending on the direction of the virtqueue traffic.)
637 *
638 * Since we usually don't want to use .text for virtio buffers,
639 * this effectively means that you cannot use
640 * 1) global variables, which are in the .bss and not in the shm
641 * file-backed memory
642 * 2) the stack in some processes, depending on where they have
643 * their stack (or maybe only no interrupt stack?)
644 *
645 * The stack is already not typically valid for DMA, so this isn't
646 * much of a restriction, but global variables might be encountered.
647 *
648 * It might be possible to fix it by copying around the data that's
649 * between bss_start and where we map the file now, but it's not
650 * something that you typically encounter with virtio drivers, so
651 * it didn't seem worthwhile.
652 */
653 rc = vhost_user_init_mem_region(reserved, physmem_size - reserved,
654 &fds[0],
655 &msg.payload.mem_regions.regions[0]);
656
657 if (rc < 0)
658 return rc;
659 if (highmem) {
660 msg.payload.mem_regions.num++;
661 rc = vhost_user_init_mem_region(__pa(end_iomem), highmem,
662 &fds[1], &msg.payload.mem_regions.regions[1]);
663 if (rc < 0)
664 return rc;
665 }
666
Johannes Berg27eca5c2019-09-11 14:51:22 +0200667 return vhost_user_send(vu_dev, false, &msg, fds,
668 msg.payload.mem_regions.num);
Erel Geron5d38f322019-09-11 14:51:20 +0200669}
670
671static int vhost_user_set_vring_state(struct virtio_uml_device *vu_dev,
672 u32 request, u32 index, u32 num)
673{
674 struct vhost_user_msg msg = {
675 .header.request = request,
676 .header.size = sizeof(msg.payload.vring_state),
677 .payload.vring_state.index = index,
678 .payload.vring_state.num = num,
679 };
680
Johannes Berg27eca5c2019-09-11 14:51:22 +0200681 return vhost_user_send(vu_dev, false, &msg, NULL, 0);
Erel Geron5d38f322019-09-11 14:51:20 +0200682}
683
684static int vhost_user_set_vring_num(struct virtio_uml_device *vu_dev,
685 u32 index, u32 num)
686{
687 return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_NUM,
688 index, num);
689}
690
691static int vhost_user_set_vring_base(struct virtio_uml_device *vu_dev,
692 u32 index, u32 offset)
693{
694 return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_BASE,
695 index, offset);
696}
697
698static int vhost_user_set_vring_addr(struct virtio_uml_device *vu_dev,
699 u32 index, u64 desc, u64 used, u64 avail,
700 u64 log)
701{
702 struct vhost_user_msg msg = {
703 .header.request = VHOST_USER_SET_VRING_ADDR,
704 .header.size = sizeof(msg.payload.vring_addr),
705 .payload.vring_addr.index = index,
706 .payload.vring_addr.desc = desc,
707 .payload.vring_addr.used = used,
708 .payload.vring_addr.avail = avail,
709 .payload.vring_addr.log = log,
710 };
711
Johannes Berg27eca5c2019-09-11 14:51:22 +0200712 return vhost_user_send(vu_dev, false, &msg, NULL, 0);
Erel Geron5d38f322019-09-11 14:51:20 +0200713}
714
715static int vhost_user_set_vring_fd(struct virtio_uml_device *vu_dev,
716 u32 request, int index, int fd)
717{
718 struct vhost_user_msg msg = {
719 .header.request = request,
720 .header.size = sizeof(msg.payload.integer),
721 .payload.integer = index,
722 };
723
724 if (index & ~VHOST_USER_VRING_INDEX_MASK)
725 return -EINVAL;
726 if (fd < 0) {
727 msg.payload.integer |= VHOST_USER_VRING_POLL_MASK;
Johannes Berg27eca5c2019-09-11 14:51:22 +0200728 return vhost_user_send(vu_dev, false, &msg, NULL, 0);
Erel Geron5d38f322019-09-11 14:51:20 +0200729 }
Johannes Berg27eca5c2019-09-11 14:51:22 +0200730 return vhost_user_send(vu_dev, false, &msg, &fd, 1);
Erel Geron5d38f322019-09-11 14:51:20 +0200731}
732
733static int vhost_user_set_vring_call(struct virtio_uml_device *vu_dev,
734 int index, int fd)
735{
736 return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_CALL,
737 index, fd);
738}
739
740static int vhost_user_set_vring_kick(struct virtio_uml_device *vu_dev,
741 int index, int fd)
742{
743 return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_KICK,
744 index, fd);
745}
746
747static int vhost_user_set_vring_enable(struct virtio_uml_device *vu_dev,
748 u32 index, bool enable)
749{
750 if (!(vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)))
751 return 0;
752
753 return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_ENABLE,
754 index, enable);
755}
756
757
758/* Virtio interface */
759
760static bool vu_notify(struct virtqueue *vq)
761{
762 struct virtio_uml_vq_info *info = vq->priv;
763 const uint64_t n = 1;
764 int rc;
765
Johannes Berg9b845122020-12-15 10:52:23 +0100766 if (info->suspended)
767 return true;
768
Johannes Berg88ce6422020-02-13 14:26:47 +0100769 time_travel_propagate_time();
770
Johannes Bergdd9ada52020-02-13 14:26:46 +0100771 if (info->kick_fd < 0) {
772 struct virtio_uml_device *vu_dev;
773
774 vu_dev = to_virtio_uml_device(vq->vdev);
775
776 return vhost_user_set_vring_state(vu_dev, VHOST_USER_VRING_KICK,
777 vq->index, 0) == 0;
778 }
779
Erel Geron5d38f322019-09-11 14:51:20 +0200780 do {
781 rc = os_write_file(info->kick_fd, &n, sizeof(n));
782 } while (rc == -EINTR);
783 return !WARN(rc != sizeof(n), "write returned %d\n", rc);
784}
785
786static irqreturn_t vu_interrupt(int irq, void *opaque)
787{
788 struct virtqueue *vq = opaque;
789 struct virtio_uml_vq_info *info = vq->priv;
790 uint64_t n;
791 int rc;
792 irqreturn_t ret = IRQ_NONE;
793
794 do {
795 rc = os_read_file(info->call_fd, &n, sizeof(n));
796 if (rc == sizeof(n))
797 ret |= vring_interrupt(irq, vq);
798 } while (rc == sizeof(n) || rc == -EINTR);
799 WARN(rc != -EAGAIN, "read returned %d\n", rc);
800 return ret;
801}
802
803
804static void vu_get(struct virtio_device *vdev, unsigned offset,
805 void *buf, unsigned len)
806{
807 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
808
809 vhost_user_get_config(vu_dev, offset, buf, len);
810}
811
812static void vu_set(struct virtio_device *vdev, unsigned offset,
813 const void *buf, unsigned len)
814{
815 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
816
817 vhost_user_set_config(vu_dev, offset, buf, len);
818}
819
820static u8 vu_get_status(struct virtio_device *vdev)
821{
822 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
823
824 return vu_dev->status;
825}
826
827static void vu_set_status(struct virtio_device *vdev, u8 status)
828{
829 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
830
831 vu_dev->status = status;
832}
833
834static void vu_reset(struct virtio_device *vdev)
835{
836 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
837
838 vu_dev->status = 0;
839}
840
841static void vu_del_vq(struct virtqueue *vq)
842{
843 struct virtio_uml_vq_info *info = vq->priv;
844
Johannes Bergdd9ada52020-02-13 14:26:46 +0100845 if (info->call_fd >= 0) {
Johannes Bergaaf58002020-12-02 12:59:51 +0100846 struct virtio_uml_device *vu_dev;
847
848 vu_dev = to_virtio_uml_device(vq->vdev);
849
850 um_free_irq(vu_dev->irq, vq);
Johannes Bergdd9ada52020-02-13 14:26:46 +0100851 os_close_file(info->call_fd);
852 }
Erel Geron5d38f322019-09-11 14:51:20 +0200853
Johannes Bergdd9ada52020-02-13 14:26:46 +0100854 if (info->kick_fd >= 0)
855 os_close_file(info->kick_fd);
Erel Geron5d38f322019-09-11 14:51:20 +0200856
857 vring_del_virtqueue(vq);
858 kfree(info);
859}
860
861static void vu_del_vqs(struct virtio_device *vdev)
862{
863 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
864 struct virtqueue *vq, *n;
865 u64 features;
866
867 /* Note: reverse order as a workaround to a decoding bug in snabb */
868 list_for_each_entry_reverse(vq, &vdev->vqs, list)
869 WARN_ON(vhost_user_set_vring_enable(vu_dev, vq->index, false));
870
871 /* Ensure previous messages have been processed */
872 WARN_ON(vhost_user_get_features(vu_dev, &features));
873
874 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
875 vu_del_vq(vq);
876}
877
878static int vu_setup_vq_call_fd(struct virtio_uml_device *vu_dev,
879 struct virtqueue *vq)
880{
881 struct virtio_uml_vq_info *info = vq->priv;
882 int call_fds[2];
883 int rc;
884
Johannes Bergdd9ada52020-02-13 14:26:46 +0100885 /* no call FD needed/desired in this case */
886 if (vu_dev->protocol_features &
887 BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) &&
888 vu_dev->protocol_features &
889 BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
890 info->call_fd = -1;
891 return 0;
892 }
893
Erel Geron5d38f322019-09-11 14:51:20 +0200894 /* Use a pipe for call fd, since SIGIO is not supported for eventfd */
895 rc = os_pipe(call_fds, true, true);
896 if (rc < 0)
897 return rc;
898
899 info->call_fd = call_fds[0];
Johannes Bergaaf58002020-12-02 12:59:51 +0100900 rc = um_request_irq(vu_dev->irq, info->call_fd, IRQ_READ,
Erel Geron5d38f322019-09-11 14:51:20 +0200901 vu_interrupt, IRQF_SHARED, info->name, vq);
Johannes Berg36d46a52020-12-02 12:59:50 +0100902 if (rc < 0)
Erel Geron5d38f322019-09-11 14:51:20 +0200903 goto close_both;
904
905 rc = vhost_user_set_vring_call(vu_dev, vq->index, call_fds[1]);
906 if (rc)
907 goto release_irq;
908
909 goto out;
910
911release_irq:
Johannes Bergaaf58002020-12-02 12:59:51 +0100912 um_free_irq(vu_dev->irq, vq);
Erel Geron5d38f322019-09-11 14:51:20 +0200913close_both:
914 os_close_file(call_fds[0]);
915out:
916 /* Close (unused) write end of call fds */
917 os_close_file(call_fds[1]);
918
919 return rc;
920}
921
922static struct virtqueue *vu_setup_vq(struct virtio_device *vdev,
923 unsigned index, vq_callback_t *callback,
924 const char *name, bool ctx)
925{
926 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
927 struct platform_device *pdev = vu_dev->pdev;
928 struct virtio_uml_vq_info *info;
929 struct virtqueue *vq;
930 int num = MAX_SUPPORTED_QUEUE_SIZE;
931 int rc;
932
933 info = kzalloc(sizeof(*info), GFP_KERNEL);
934 if (!info) {
935 rc = -ENOMEM;
936 goto error_kzalloc;
937 }
938 snprintf(info->name, sizeof(info->name), "%s.%d-%s", pdev->name,
939 pdev->id, name);
940
941 vq = vring_create_virtqueue(index, num, PAGE_SIZE, vdev, true, true,
942 ctx, vu_notify, callback, info->name);
943 if (!vq) {
944 rc = -ENOMEM;
945 goto error_create;
946 }
947 vq->priv = info;
948 num = virtqueue_get_vring_size(vq);
949
Johannes Bergdd9ada52020-02-13 14:26:46 +0100950 if (vu_dev->protocol_features &
951 BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS)) {
952 info->kick_fd = -1;
953 } else {
954 rc = os_eventfd(0, 0);
955 if (rc < 0)
956 goto error_kick;
957 info->kick_fd = rc;
958 }
Erel Geron5d38f322019-09-11 14:51:20 +0200959
960 rc = vu_setup_vq_call_fd(vu_dev, vq);
961 if (rc)
962 goto error_call;
963
964 rc = vhost_user_set_vring_num(vu_dev, index, num);
965 if (rc)
966 goto error_setup;
967
968 rc = vhost_user_set_vring_base(vu_dev, index, 0);
969 if (rc)
970 goto error_setup;
971
972 rc = vhost_user_set_vring_addr(vu_dev, index,
973 virtqueue_get_desc_addr(vq),
974 virtqueue_get_used_addr(vq),
975 virtqueue_get_avail_addr(vq),
976 (u64) -1);
977 if (rc)
978 goto error_setup;
979
980 return vq;
981
982error_setup:
Johannes Bergdd9ada52020-02-13 14:26:46 +0100983 if (info->call_fd >= 0) {
Johannes Bergaaf58002020-12-02 12:59:51 +0100984 um_free_irq(vu_dev->irq, vq);
Johannes Bergdd9ada52020-02-13 14:26:46 +0100985 os_close_file(info->call_fd);
986 }
Erel Geron5d38f322019-09-11 14:51:20 +0200987error_call:
Johannes Bergdd9ada52020-02-13 14:26:46 +0100988 if (info->kick_fd >= 0)
989 os_close_file(info->kick_fd);
Erel Geron5d38f322019-09-11 14:51:20 +0200990error_kick:
991 vring_del_virtqueue(vq);
992error_create:
993 kfree(info);
994error_kzalloc:
995 return ERR_PTR(rc);
996}
997
998static int vu_find_vqs(struct virtio_device *vdev, unsigned nvqs,
999 struct virtqueue *vqs[], vq_callback_t *callbacks[],
1000 const char * const names[], const bool *ctx,
1001 struct irq_affinity *desc)
1002{
1003 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1004 int i, queue_idx = 0, rc;
1005 struct virtqueue *vq;
1006
Johannes Bergc8177ab2020-12-15 10:52:24 +01001007 /* not supported for now */
1008 if (WARN_ON(nvqs > 64))
1009 return -EINVAL;
1010
Erel Geron5d38f322019-09-11 14:51:20 +02001011 rc = vhost_user_set_mem_table(vu_dev);
1012 if (rc)
1013 return rc;
1014
1015 for (i = 0; i < nvqs; ++i) {
1016 if (!names[i]) {
1017 vqs[i] = NULL;
1018 continue;
1019 }
1020
1021 vqs[i] = vu_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
1022 ctx ? ctx[i] : false);
1023 if (IS_ERR(vqs[i])) {
1024 rc = PTR_ERR(vqs[i]);
1025 goto error_setup;
1026 }
1027 }
1028
1029 list_for_each_entry(vq, &vdev->vqs, list) {
1030 struct virtio_uml_vq_info *info = vq->priv;
1031
Johannes Bergdd9ada52020-02-13 14:26:46 +01001032 if (info->kick_fd >= 0) {
1033 rc = vhost_user_set_vring_kick(vu_dev, vq->index,
1034 info->kick_fd);
1035 if (rc)
1036 goto error_setup;
1037 }
Erel Geron5d38f322019-09-11 14:51:20 +02001038
1039 rc = vhost_user_set_vring_enable(vu_dev, vq->index, true);
1040 if (rc)
1041 goto error_setup;
1042 }
1043
1044 return 0;
1045
1046error_setup:
1047 vu_del_vqs(vdev);
1048 return rc;
1049}
1050
1051static u64 vu_get_features(struct virtio_device *vdev)
1052{
1053 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1054
1055 return vu_dev->features;
1056}
1057
1058static int vu_finalize_features(struct virtio_device *vdev)
1059{
1060 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1061 u64 supported = vdev->features & VHOST_USER_SUPPORTED_F;
1062
1063 vring_transport_features(vdev);
1064 vu_dev->features = vdev->features | supported;
1065
1066 return vhost_user_set_features(vu_dev, vu_dev->features);
1067}
1068
1069static const char *vu_bus_name(struct virtio_device *vdev)
1070{
1071 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1072
1073 return vu_dev->pdev->name;
1074}
1075
1076static const struct virtio_config_ops virtio_uml_config_ops = {
1077 .get = vu_get,
1078 .set = vu_set,
1079 .get_status = vu_get_status,
1080 .set_status = vu_set_status,
1081 .reset = vu_reset,
1082 .find_vqs = vu_find_vqs,
1083 .del_vqs = vu_del_vqs,
1084 .get_features = vu_get_features,
1085 .finalize_features = vu_finalize_features,
1086 .bus_name = vu_bus_name,
1087};
1088
Erel Geron5d38f322019-09-11 14:51:20 +02001089static void virtio_uml_release_dev(struct device *d)
1090{
1091 struct virtio_device *vdev =
1092 container_of(d, struct virtio_device, dev);
1093 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1094
Johannes Berg85e73962021-09-16 13:09:22 +02001095 time_travel_propagate_time();
1096
Johannes Berg2cd097b2019-09-11 14:51:21 +02001097 /* might not have been opened due to not negotiating the feature */
1098 if (vu_dev->req_fd >= 0) {
Johannes Bergaaf58002020-12-02 12:59:51 +01001099 um_free_irq(vu_dev->irq, vu_dev);
Johannes Berg2cd097b2019-09-11 14:51:21 +02001100 os_close_file(vu_dev->req_fd);
1101 }
1102
Erel Geron5d38f322019-09-11 14:51:20 +02001103 os_close_file(vu_dev->sock);
Johannes Bergf4172b02021-01-07 22:15:21 +01001104 kfree(vu_dev);
Erel Geron5d38f322019-09-11 14:51:20 +02001105}
1106
Johannes Berg43c590c2021-03-05 13:19:59 +01001107void virtio_uml_set_no_vq_suspend(struct virtio_device *vdev,
1108 bool no_vq_suspend)
1109{
1110 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1111
1112 if (WARN_ON(vdev->config != &virtio_uml_config_ops))
1113 return;
1114
1115 vu_dev->no_vq_suspend = no_vq_suspend;
1116 dev_info(&vdev->dev, "%sabled VQ suspend\n",
1117 no_vq_suspend ? "dis" : "en");
1118}
1119
Vincent Whitchurchdb0dd9c2021-12-21 10:04:46 +01001120static void vu_of_conn_broken(struct work_struct *wk)
1121{
1122 /*
1123 * We can't remove the device from the devicetree so the only thing we
1124 * can do is warn.
1125 */
1126 WARN_ON(1);
1127}
1128
Erel Geron5d38f322019-09-11 14:51:20 +02001129/* Platform device */
1130
Vincent Whitchurchdb0dd9c2021-12-21 10:04:46 +01001131static struct virtio_uml_platform_data *
1132virtio_uml_create_pdata(struct platform_device *pdev)
1133{
1134 struct device_node *np = pdev->dev.of_node;
1135 struct virtio_uml_platform_data *pdata;
1136 int ret;
1137
1138 if (!np)
1139 return ERR_PTR(-EINVAL);
1140
1141 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1142 if (!pdata)
1143 return ERR_PTR(-ENOMEM);
1144
1145 INIT_WORK(&pdata->conn_broken_wk, vu_of_conn_broken);
1146 pdata->pdev = pdev;
1147
1148 ret = of_property_read_string(np, "socket-path", &pdata->socket_path);
1149 if (ret)
1150 return ERR_PTR(ret);
1151
1152 ret = of_property_read_u32(np, "virtio-device-id",
1153 &pdata->virtio_device_id);
1154 if (ret)
1155 return ERR_PTR(ret);
1156
1157 return pdata;
1158}
1159
Erel Geron5d38f322019-09-11 14:51:20 +02001160static int virtio_uml_probe(struct platform_device *pdev)
1161{
1162 struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
1163 struct virtio_uml_device *vu_dev;
1164 int rc;
1165
Vincent Whitchurchdb0dd9c2021-12-21 10:04:46 +01001166 if (!pdata) {
1167 pdata = virtio_uml_create_pdata(pdev);
1168 if (IS_ERR(pdata))
1169 return PTR_ERR(pdata);
1170 }
Erel Geron5d38f322019-09-11 14:51:20 +02001171
Johannes Bergf4172b02021-01-07 22:15:21 +01001172 vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL);
Erel Geron5d38f322019-09-11 14:51:20 +02001173 if (!vu_dev)
1174 return -ENOMEM;
1175
Vincent Whitchurchdb0dd9c2021-12-21 10:04:46 +01001176 vu_dev->pdata = pdata;
Erel Geron5d38f322019-09-11 14:51:20 +02001177 vu_dev->vdev.dev.parent = &pdev->dev;
1178 vu_dev->vdev.dev.release = virtio_uml_release_dev;
1179 vu_dev->vdev.config = &virtio_uml_config_ops;
1180 vu_dev->vdev.id.device = pdata->virtio_device_id;
1181 vu_dev->vdev.id.vendor = VIRTIO_DEV_ANY_ID;
1182 vu_dev->pdev = pdev;
Johannes Berg2cd097b2019-09-11 14:51:21 +02001183 vu_dev->req_fd = -1;
Erel Geron5d38f322019-09-11 14:51:20 +02001184
Johannes Berg85e73962021-09-16 13:09:22 +02001185 time_travel_propagate_time();
1186
Erel Geron5d38f322019-09-11 14:51:20 +02001187 do {
1188 rc = os_connect_socket(pdata->socket_path);
1189 } while (rc == -EINTR);
1190 if (rc < 0)
Johannes Berg7ad28e02021-06-25 10:34:37 +02001191 goto error_free;
Erel Geron5d38f322019-09-11 14:51:20 +02001192 vu_dev->sock = rc;
1193
Johannes Bergdd9ada52020-02-13 14:26:46 +01001194 spin_lock_init(&vu_dev->sock_lock);
1195
Erel Geron5d38f322019-09-11 14:51:20 +02001196 rc = vhost_user_init(vu_dev);
1197 if (rc)
1198 goto error_init;
1199
1200 platform_set_drvdata(pdev, vu_dev);
1201
Johannes Berg1fcf9da2020-12-15 10:52:25 +01001202 device_set_wakeup_capable(&vu_dev->vdev.dev, true);
1203
Erel Geron5d38f322019-09-11 14:51:20 +02001204 rc = register_virtio_device(&vu_dev->vdev);
1205 if (rc)
1206 put_device(&vu_dev->vdev.dev);
Johannes Berg04e5b1f2019-09-18 21:24:13 +02001207 vu_dev->registered = 1;
Erel Geron5d38f322019-09-11 14:51:20 +02001208 return rc;
1209
1210error_init:
1211 os_close_file(vu_dev->sock);
Johannes Berg7ad28e02021-06-25 10:34:37 +02001212error_free:
1213 kfree(vu_dev);
Erel Geron5d38f322019-09-11 14:51:20 +02001214 return rc;
1215}
1216
1217static int virtio_uml_remove(struct platform_device *pdev)
1218{
1219 struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
1220
1221 unregister_virtio_device(&vu_dev->vdev);
1222 return 0;
1223}
1224
1225/* Command line device list */
1226
1227static void vu_cmdline_release_dev(struct device *d)
1228{
1229}
1230
1231static struct device vu_cmdline_parent = {
1232 .init_name = "virtio-uml-cmdline",
1233 .release = vu_cmdline_release_dev,
1234};
1235
1236static bool vu_cmdline_parent_registered;
1237static int vu_cmdline_id;
1238
Johannes Berg04e5b1f2019-09-18 21:24:13 +02001239static int vu_unregister_cmdline_device(struct device *dev, void *data)
1240{
1241 struct platform_device *pdev = to_platform_device(dev);
1242 struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
1243
1244 kfree(pdata->socket_path);
1245 platform_device_unregister(pdev);
1246 return 0;
1247}
1248
1249static void vu_conn_broken(struct work_struct *wk)
1250{
1251 struct virtio_uml_platform_data *pdata;
1252
1253 pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
1254 vu_unregister_cmdline_device(&pdata->pdev->dev, NULL);
1255}
1256
Erel Geron5d38f322019-09-11 14:51:20 +02001257static int vu_cmdline_set(const char *device, const struct kernel_param *kp)
1258{
1259 const char *ids = strchr(device, ':');
1260 unsigned int virtio_device_id;
1261 int processed, consumed, err;
1262 char *socket_path;
Johannes Berg04e5b1f2019-09-18 21:24:13 +02001263 struct virtio_uml_platform_data pdata, *ppdata;
Erel Geron5d38f322019-09-11 14:51:20 +02001264 struct platform_device *pdev;
1265
1266 if (!ids || ids == device)
1267 return -EINVAL;
1268
1269 processed = sscanf(ids, ":%u%n:%d%n",
1270 &virtio_device_id, &consumed,
1271 &vu_cmdline_id, &consumed);
1272
1273 if (processed < 1 || ids[consumed])
1274 return -EINVAL;
1275
1276 if (!vu_cmdline_parent_registered) {
1277 err = device_register(&vu_cmdline_parent);
1278 if (err) {
1279 pr_err("Failed to register parent device!\n");
1280 put_device(&vu_cmdline_parent);
1281 return err;
1282 }
1283 vu_cmdline_parent_registered = true;
1284 }
1285
1286 socket_path = kmemdup_nul(device, ids - device, GFP_KERNEL);
1287 if (!socket_path)
1288 return -ENOMEM;
1289
1290 pdata.virtio_device_id = (u32) virtio_device_id;
1291 pdata.socket_path = socket_path;
1292
1293 pr_info("Registering device virtio-uml.%d id=%d at %s\n",
1294 vu_cmdline_id, virtio_device_id, socket_path);
1295
1296 pdev = platform_device_register_data(&vu_cmdline_parent, "virtio-uml",
1297 vu_cmdline_id++, &pdata,
1298 sizeof(pdata));
1299 err = PTR_ERR_OR_ZERO(pdev);
1300 if (err)
1301 goto free;
Johannes Berg04e5b1f2019-09-18 21:24:13 +02001302
1303 ppdata = pdev->dev.platform_data;
1304 ppdata->pdev = pdev;
1305 INIT_WORK(&ppdata->conn_broken_wk, vu_conn_broken);
1306
Erel Geron5d38f322019-09-11 14:51:20 +02001307 return 0;
1308
1309free:
1310 kfree(socket_path);
1311 return err;
1312}
1313
1314static int vu_cmdline_get_device(struct device *dev, void *data)
1315{
1316 struct platform_device *pdev = to_platform_device(dev);
1317 struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
1318 char *buffer = data;
1319 unsigned int len = strlen(buffer);
1320
1321 snprintf(buffer + len, PAGE_SIZE - len, "%s:%d:%d\n",
1322 pdata->socket_path, pdata->virtio_device_id, pdev->id);
1323 return 0;
1324}
1325
1326static int vu_cmdline_get(char *buffer, const struct kernel_param *kp)
1327{
1328 buffer[0] = '\0';
1329 if (vu_cmdline_parent_registered)
1330 device_for_each_child(&vu_cmdline_parent, buffer,
1331 vu_cmdline_get_device);
1332 return strlen(buffer) + 1;
1333}
1334
1335static const struct kernel_param_ops vu_cmdline_param_ops = {
1336 .set = vu_cmdline_set,
1337 .get = vu_cmdline_get,
1338};
1339
1340device_param_cb(device, &vu_cmdline_param_ops, NULL, S_IRUSR);
1341__uml_help(vu_cmdline_param_ops,
1342"virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]\n"
1343" Configure a virtio device over a vhost-user socket.\n"
1344" See virtio_ids.h for a list of possible virtio device id values.\n"
1345" Optionally use a specific platform_device id.\n\n"
1346);
1347
1348
Erel Geron5d38f322019-09-11 14:51:20 +02001349static void vu_unregister_cmdline_devices(void)
1350{
1351 if (vu_cmdline_parent_registered) {
1352 device_for_each_child(&vu_cmdline_parent, NULL,
1353 vu_unregister_cmdline_device);
1354 device_unregister(&vu_cmdline_parent);
1355 vu_cmdline_parent_registered = false;
1356 }
1357}
1358
1359/* Platform driver */
1360
1361static const struct of_device_id virtio_uml_match[] = {
1362 { .compatible = "virtio,uml", },
1363 { }
1364};
1365MODULE_DEVICE_TABLE(of, virtio_uml_match);
1366
Johannes Berg9b845122020-12-15 10:52:23 +01001367static int virtio_uml_suspend(struct platform_device *pdev, pm_message_t state)
1368{
1369 struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
Johannes Berg9b845122020-12-15 10:52:23 +01001370
Johannes Berg43c590c2021-03-05 13:19:59 +01001371 if (!vu_dev->no_vq_suspend) {
1372 struct virtqueue *vq;
Johannes Berg9b845122020-12-15 10:52:23 +01001373
Johannes Berg43c590c2021-03-05 13:19:59 +01001374 virtio_device_for_each_vq((&vu_dev->vdev), vq) {
1375 struct virtio_uml_vq_info *info = vq->priv;
1376
1377 info->suspended = true;
1378 vhost_user_set_vring_enable(vu_dev, vq->index, false);
1379 }
Johannes Berg9b845122020-12-15 10:52:23 +01001380 }
1381
Johannes Berg1fcf9da2020-12-15 10:52:25 +01001382 if (!device_may_wakeup(&vu_dev->vdev.dev)) {
1383 vu_dev->suspended = true;
1384 return 0;
1385 }
1386
1387 return irq_set_irq_wake(vu_dev->irq, 1);
Johannes Berg9b845122020-12-15 10:52:23 +01001388}
1389
1390static int virtio_uml_resume(struct platform_device *pdev)
1391{
1392 struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
Johannes Berg9b845122020-12-15 10:52:23 +01001393
Johannes Berg43c590c2021-03-05 13:19:59 +01001394 if (!vu_dev->no_vq_suspend) {
1395 struct virtqueue *vq;
Johannes Berg9b845122020-12-15 10:52:23 +01001396
Johannes Berg43c590c2021-03-05 13:19:59 +01001397 virtio_device_for_each_vq((&vu_dev->vdev), vq) {
1398 struct virtio_uml_vq_info *info = vq->priv;
1399
1400 info->suspended = false;
1401 vhost_user_set_vring_enable(vu_dev, vq->index, true);
1402 }
Johannes Berg9b845122020-12-15 10:52:23 +01001403 }
1404
Johannes Berg1fcf9da2020-12-15 10:52:25 +01001405 vu_dev->suspended = false;
1406
1407 if (!device_may_wakeup(&vu_dev->vdev.dev))
1408 return 0;
1409
1410 return irq_set_irq_wake(vu_dev->irq, 0);
Johannes Berg9b845122020-12-15 10:52:23 +01001411}
1412
Erel Geron5d38f322019-09-11 14:51:20 +02001413static struct platform_driver virtio_uml_driver = {
1414 .probe = virtio_uml_probe,
1415 .remove = virtio_uml_remove,
1416 .driver = {
1417 .name = "virtio-uml",
1418 .of_match_table = virtio_uml_match,
1419 },
Johannes Berg9b845122020-12-15 10:52:23 +01001420 .suspend = virtio_uml_suspend,
1421 .resume = virtio_uml_resume,
Erel Geron5d38f322019-09-11 14:51:20 +02001422};
1423
1424static int __init virtio_uml_init(void)
1425{
1426 return platform_driver_register(&virtio_uml_driver);
1427}
1428
1429static void __exit virtio_uml_exit(void)
1430{
1431 platform_driver_unregister(&virtio_uml_driver);
1432 vu_unregister_cmdline_devices();
1433}
1434
1435module_init(virtio_uml_init);
1436module_exit(virtio_uml_exit);
1437__uml_exitcall(virtio_uml_exit);
1438
1439MODULE_DESCRIPTION("UML driver for vhost-user virtio devices");
1440MODULE_LICENSE("GPL");