blob: 9a4bdc5e5d349bd2bd9b190de68d59ae1276e155 [file] [log] [blame]
Stefano Stabellini72e59c32017-07-05 13:08:39 -07001/*
2 * (c) 2017 Stefano Stabellini <stefano@aporeto.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
Stefano Stabellinifb029872017-07-06 11:01:00 -070015#include <linux/inet.h>
Stefano Stabellini72e59c32017-07-05 13:08:39 -070016#include <linux/kthread.h>
17#include <linux/list.h>
18#include <linux/radix-tree.h>
19#include <linux/module.h>
20#include <linux/semaphore.h>
21#include <linux/wait.h>
Stefano Stabellinifb029872017-07-06 11:01:00 -070022#include <net/sock.h>
23#include <net/inet_common.h>
24#include <net/inet_connection_sock.h>
25#include <net/request_sock.h>
Stefano Stabellini72e59c32017-07-05 13:08:39 -070026
27#include <xen/events.h>
28#include <xen/grant_table.h>
29#include <xen/xen.h>
30#include <xen/xenbus.h>
31#include <xen/interface/io/pvcalls.h>
32
Stefano Stabellini0a9c75c2017-07-06 10:59:17 -070033#define PVCALLS_VERSIONS "1"
34#define MAX_RING_ORDER XENBUS_MAX_RING_GRANT_ORDER
35
Stefano Stabellini9be07332017-07-05 13:08:48 -070036struct pvcalls_back_global {
37 struct list_head frontends;
38 struct semaphore frontends_lock;
39} pvcalls_back_global;
40
Stefano Stabellinid0e4d562017-07-06 10:59:29 -070041/*
42 * Per-frontend data structure. It contains pointers to the command
43 * ring, its event channel, a list of active sockets and a tree of
44 * passive sockets.
45 */
46struct pvcalls_fedata {
47 struct list_head list;
48 struct xenbus_device *dev;
49 struct xen_pvcalls_sring *sring;
50 struct xen_pvcalls_back_ring ring;
51 int irq;
52 struct list_head socket_mappings;
53 struct radix_tree_root socketpass_mappings;
54 struct semaphore socket_lock;
55};
56
Stefano Stabellini5db4d282017-07-06 11:01:06 -070057struct pvcalls_ioworker {
58 struct work_struct register_work;
59 struct workqueue_struct *wq;
60};
61
62struct sock_mapping {
63 struct list_head list;
64 struct pvcalls_fedata *fedata;
Stefano Stabellini6f474e72017-07-06 11:01:07 -070065 struct sockpass_mapping *sockpass;
Stefano Stabellini5db4d282017-07-06 11:01:06 -070066 struct socket *sock;
67 uint64_t id;
68 grant_ref_t ref;
69 struct pvcalls_data_intf *ring;
70 void *bytes;
71 struct pvcalls_data data;
72 uint32_t ring_order;
73 int irq;
74 atomic_t read;
75 atomic_t write;
76 atomic_t io;
77 atomic_t release;
78 void (*saved_data_ready)(struct sock *sk);
79 struct pvcalls_ioworker ioworker;
80};
81
Stefano Stabellini331a63e2017-07-06 11:01:06 -070082struct sockpass_mapping {
83 struct list_head list;
84 struct pvcalls_fedata *fedata;
85 struct socket *sock;
86 uint64_t id;
87 struct xen_pvcalls_request reqcopy;
88 spinlock_t copy_lock;
89 struct workqueue_struct *wq;
90 struct work_struct register_work;
91 void (*saved_data_ready)(struct sock *sk);
92};
93
Stefano Stabellini5db4d282017-07-06 11:01:06 -070094static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map);
95static int pvcalls_back_release_active(struct xenbus_device *dev,
96 struct pvcalls_fedata *fedata,
97 struct sock_mapping *map);
98
99static void pvcalls_back_ioworker(struct work_struct *work)
100{
101}
102
Stefano Stabellinib1efa692017-07-06 11:00:00 -0700103static int pvcalls_back_socket(struct xenbus_device *dev,
104 struct xen_pvcalls_request *req)
105{
Stefano Stabellinifb029872017-07-06 11:01:00 -0700106 struct pvcalls_fedata *fedata;
107 int ret;
108 struct xen_pvcalls_response *rsp;
109
110 fedata = dev_get_drvdata(&dev->dev);
111
112 if (req->u.socket.domain != AF_INET ||
113 req->u.socket.type != SOCK_STREAM ||
114 (req->u.socket.protocol != IPPROTO_IP &&
115 req->u.socket.protocol != AF_INET))
116 ret = -EAFNOSUPPORT;
117 else
118 ret = 0;
119
120 /* leave the actual socket allocation for later */
121
122 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
123 rsp->req_id = req->req_id;
124 rsp->cmd = req->cmd;
125 rsp->u.socket.id = req->u.socket.id;
126 rsp->ret = ret;
127
Stefano Stabellinib1efa692017-07-06 11:00:00 -0700128 return 0;
129}
130
Stefano Stabellini5db4d282017-07-06 11:01:06 -0700131static void pvcalls_sk_state_change(struct sock *sock)
132{
133 struct sock_mapping *map = sock->sk_user_data;
134 struct pvcalls_data_intf *intf;
135
136 if (map == NULL)
137 return;
138
139 intf = map->ring;
140 intf->in_error = -ENOTCONN;
141 notify_remote_via_irq(map->irq);
142}
143
144static void pvcalls_sk_data_ready(struct sock *sock)
145{
146}
147
148static struct sock_mapping *pvcalls_new_active_socket(
149 struct pvcalls_fedata *fedata,
150 uint64_t id,
151 grant_ref_t ref,
152 uint32_t evtchn,
153 struct socket *sock)
154{
155 int ret;
156 struct sock_mapping *map;
157 void *page;
158
159 map = kzalloc(sizeof(*map), GFP_KERNEL);
160 if (map == NULL)
161 return NULL;
162
163 map->fedata = fedata;
164 map->sock = sock;
165 map->id = id;
166 map->ref = ref;
167
168 ret = xenbus_map_ring_valloc(fedata->dev, &ref, 1, &page);
169 if (ret < 0)
170 goto out;
171 map->ring = page;
172 map->ring_order = map->ring->ring_order;
173 /* first read the order, then map the data ring */
174 virt_rmb();
175 if (map->ring_order > MAX_RING_ORDER) {
176 pr_warn("%s frontend requested ring_order %u, which is > MAX (%u)\n",
177 __func__, map->ring_order, MAX_RING_ORDER);
178 goto out;
179 }
180 ret = xenbus_map_ring_valloc(fedata->dev, map->ring->ref,
181 (1 << map->ring_order), &page);
182 if (ret < 0)
183 goto out;
184 map->bytes = page;
185
186 ret = bind_interdomain_evtchn_to_irqhandler(fedata->dev->otherend_id,
187 evtchn,
188 pvcalls_back_conn_event,
189 0,
190 "pvcalls-backend",
191 map);
192 if (ret < 0)
193 goto out;
194 map->irq = ret;
195
196 map->data.in = map->bytes;
197 map->data.out = map->bytes + XEN_FLEX_RING_SIZE(map->ring_order);
198
199 map->ioworker.wq = alloc_workqueue("pvcalls_io", WQ_UNBOUND, 1);
200 if (!map->ioworker.wq)
201 goto out;
202 atomic_set(&map->io, 1);
203 INIT_WORK(&map->ioworker.register_work, pvcalls_back_ioworker);
204
205 down(&fedata->socket_lock);
206 list_add_tail(&map->list, &fedata->socket_mappings);
207 up(&fedata->socket_lock);
208
209 write_lock_bh(&map->sock->sk->sk_callback_lock);
210 map->saved_data_ready = map->sock->sk->sk_data_ready;
211 map->sock->sk->sk_user_data = map;
212 map->sock->sk->sk_data_ready = pvcalls_sk_data_ready;
213 map->sock->sk->sk_state_change = pvcalls_sk_state_change;
214 write_unlock_bh(&map->sock->sk->sk_callback_lock);
215
216 return map;
217out:
218 down(&fedata->socket_lock);
219 list_del(&map->list);
220 pvcalls_back_release_active(fedata->dev, fedata, map);
221 up(&fedata->socket_lock);
222 return NULL;
223}
224
Stefano Stabellinib1efa692017-07-06 11:00:00 -0700225static int pvcalls_back_connect(struct xenbus_device *dev,
226 struct xen_pvcalls_request *req)
227{
Stefano Stabellini5db4d282017-07-06 11:01:06 -0700228 struct pvcalls_fedata *fedata;
229 int ret = -EINVAL;
230 struct socket *sock;
231 struct sock_mapping *map;
232 struct xen_pvcalls_response *rsp;
233 struct sockaddr *sa = (struct sockaddr *)&req->u.connect.addr;
234
235 fedata = dev_get_drvdata(&dev->dev);
236
237 if (req->u.connect.len < sizeof(sa->sa_family) ||
238 req->u.connect.len > sizeof(req->u.connect.addr) ||
239 sa->sa_family != AF_INET)
240 goto out;
241
242 ret = sock_create(AF_INET, SOCK_STREAM, 0, &sock);
243 if (ret < 0)
244 goto out;
245 ret = inet_stream_connect(sock, sa, req->u.connect.len, 0);
246 if (ret < 0) {
247 sock_release(sock);
248 goto out;
249 }
250
251 map = pvcalls_new_active_socket(fedata,
252 req->u.connect.id,
253 req->u.connect.ref,
254 req->u.connect.evtchn,
255 sock);
256 if (!map) {
257 ret = -EFAULT;
258 sock_release(map->sock);
259 }
260
261out:
262 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
263 rsp->req_id = req->req_id;
264 rsp->cmd = req->cmd;
265 rsp->u.connect.id = req->u.connect.id;
266 rsp->ret = ret;
267
268 return 0;
269}
270
271static int pvcalls_back_release_active(struct xenbus_device *dev,
272 struct pvcalls_fedata *fedata,
273 struct sock_mapping *map)
274{
Stefano Stabellinib1efa692017-07-06 11:00:00 -0700275 return 0;
276}
277
278static int pvcalls_back_release(struct xenbus_device *dev,
279 struct xen_pvcalls_request *req)
280{
281 return 0;
282}
283
Stefano Stabellini331a63e2017-07-06 11:01:06 -0700284static void __pvcalls_back_accept(struct work_struct *work)
285{
Stefano Stabellini6f474e72017-07-06 11:01:07 -0700286 struct sockpass_mapping *mappass = container_of(
287 work, struct sockpass_mapping, register_work);
288 struct sock_mapping *map;
289 struct pvcalls_ioworker *iow;
290 struct pvcalls_fedata *fedata;
291 struct socket *sock;
292 struct xen_pvcalls_response *rsp;
293 struct xen_pvcalls_request *req;
294 int notify;
295 int ret = -EINVAL;
296 unsigned long flags;
297
298 fedata = mappass->fedata;
299 /*
300 * __pvcalls_back_accept can race against pvcalls_back_accept.
301 * We only need to check the value of "cmd" on read. It could be
302 * done atomically, but to simplify the code on the write side, we
303 * use a spinlock.
304 */
305 spin_lock_irqsave(&mappass->copy_lock, flags);
306 req = &mappass->reqcopy;
307 if (req->cmd != PVCALLS_ACCEPT) {
308 spin_unlock_irqrestore(&mappass->copy_lock, flags);
309 return;
310 }
311 spin_unlock_irqrestore(&mappass->copy_lock, flags);
312
313 sock = sock_alloc();
314 if (sock == NULL)
315 goto out_error;
316 sock->type = mappass->sock->type;
317 sock->ops = mappass->sock->ops;
318
319 ret = inet_accept(mappass->sock, sock, O_NONBLOCK, true);
320 if (ret == -EAGAIN) {
321 sock_release(sock);
322 goto out_error;
323 }
324
325 map = pvcalls_new_active_socket(fedata,
326 req->u.accept.id_new,
327 req->u.accept.ref,
328 req->u.accept.evtchn,
329 sock);
330 if (!map) {
331 ret = -EFAULT;
332 sock_release(sock);
333 goto out_error;
334 }
335
336 map->sockpass = mappass;
337 iow = &map->ioworker;
338 atomic_inc(&map->read);
339 atomic_inc(&map->io);
340 queue_work(iow->wq, &iow->register_work);
341
342out_error:
343 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
344 rsp->req_id = req->req_id;
345 rsp->cmd = req->cmd;
346 rsp->u.accept.id = req->u.accept.id;
347 rsp->ret = ret;
348 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify);
349 if (notify)
350 notify_remote_via_irq(fedata->irq);
351
352 mappass->reqcopy.cmd = 0;
Stefano Stabellini331a63e2017-07-06 11:01:06 -0700353}
354
355static void pvcalls_pass_sk_data_ready(struct sock *sock)
356{
Stefano Stabellini6f474e72017-07-06 11:01:07 -0700357 struct sockpass_mapping *mappass = sock->sk_user_data;
358
359 if (mappass == NULL)
360 return;
361
362 queue_work(mappass->wq, &mappass->register_work);
Stefano Stabellini331a63e2017-07-06 11:01:06 -0700363}
364
Stefano Stabellinib1efa692017-07-06 11:00:00 -0700365static int pvcalls_back_bind(struct xenbus_device *dev,
366 struct xen_pvcalls_request *req)
367{
Stefano Stabellini331a63e2017-07-06 11:01:06 -0700368 struct pvcalls_fedata *fedata;
369 int ret;
370 struct sockpass_mapping *map;
371 struct xen_pvcalls_response *rsp;
372
373 fedata = dev_get_drvdata(&dev->dev);
374
375 map = kzalloc(sizeof(*map), GFP_KERNEL);
376 if (map == NULL) {
377 ret = -ENOMEM;
378 goto out;
379 }
380
381 INIT_WORK(&map->register_work, __pvcalls_back_accept);
382 spin_lock_init(&map->copy_lock);
383 map->wq = alloc_workqueue("pvcalls_wq", WQ_UNBOUND, 1);
384 if (!map->wq) {
385 ret = -ENOMEM;
386 goto out;
387 }
388
389 ret = sock_create(AF_INET, SOCK_STREAM, 0, &map->sock);
390 if (ret < 0)
391 goto out;
392
393 ret = inet_bind(map->sock, (struct sockaddr *)&req->u.bind.addr,
394 req->u.bind.len);
395 if (ret < 0)
396 goto out;
397
398 map->fedata = fedata;
399 map->id = req->u.bind.id;
400
401 down(&fedata->socket_lock);
402 ret = radix_tree_insert(&fedata->socketpass_mappings, map->id,
403 map);
404 up(&fedata->socket_lock);
405 if (ret)
406 goto out;
407
408 write_lock_bh(&map->sock->sk->sk_callback_lock);
409 map->saved_data_ready = map->sock->sk->sk_data_ready;
410 map->sock->sk->sk_user_data = map;
411 map->sock->sk->sk_data_ready = pvcalls_pass_sk_data_ready;
412 write_unlock_bh(&map->sock->sk->sk_callback_lock);
413
414out:
415 if (ret) {
416 if (map && map->sock)
417 sock_release(map->sock);
418 if (map && map->wq)
419 destroy_workqueue(map->wq);
420 kfree(map);
421 }
422 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
423 rsp->req_id = req->req_id;
424 rsp->cmd = req->cmd;
425 rsp->u.bind.id = req->u.bind.id;
426 rsp->ret = ret;
Stefano Stabellinib1efa692017-07-06 11:00:00 -0700427 return 0;
428}
429
430static int pvcalls_back_listen(struct xenbus_device *dev,
431 struct xen_pvcalls_request *req)
432{
Stefano Stabellini8ce3f762017-07-06 11:01:06 -0700433 struct pvcalls_fedata *fedata;
434 int ret = -EINVAL;
435 struct sockpass_mapping *map;
436 struct xen_pvcalls_response *rsp;
437
438 fedata = dev_get_drvdata(&dev->dev);
439
440 down(&fedata->socket_lock);
441 map = radix_tree_lookup(&fedata->socketpass_mappings, req->u.listen.id);
442 up(&fedata->socket_lock);
443 if (map == NULL)
444 goto out;
445
446 ret = inet_listen(map->sock, req->u.listen.backlog);
447
448out:
449 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
450 rsp->req_id = req->req_id;
451 rsp->cmd = req->cmd;
452 rsp->u.listen.id = req->u.listen.id;
453 rsp->ret = ret;
Stefano Stabellinib1efa692017-07-06 11:00:00 -0700454 return 0;
455}
456
457static int pvcalls_back_accept(struct xenbus_device *dev,
458 struct xen_pvcalls_request *req)
459{
Stefano Stabellini6f474e72017-07-06 11:01:07 -0700460 struct pvcalls_fedata *fedata;
461 struct sockpass_mapping *mappass;
462 int ret = -EINVAL;
463 struct xen_pvcalls_response *rsp;
464 unsigned long flags;
465
466 fedata = dev_get_drvdata(&dev->dev);
467
468 down(&fedata->socket_lock);
469 mappass = radix_tree_lookup(&fedata->socketpass_mappings,
470 req->u.accept.id);
471 up(&fedata->socket_lock);
472 if (mappass == NULL)
473 goto out_error;
474
475 /*
476 * Limitation of the current implementation: only support one
477 * concurrent accept or poll call on one socket.
478 */
479 spin_lock_irqsave(&mappass->copy_lock, flags);
480 if (mappass->reqcopy.cmd != 0) {
481 spin_unlock_irqrestore(&mappass->copy_lock, flags);
482 ret = -EINTR;
483 goto out_error;
484 }
485
486 mappass->reqcopy = *req;
487 spin_unlock_irqrestore(&mappass->copy_lock, flags);
488 queue_work(mappass->wq, &mappass->register_work);
489
490 /* Tell the caller we don't need to send back a notification yet */
491 return -1;
492
493out_error:
494 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
495 rsp->req_id = req->req_id;
496 rsp->cmd = req->cmd;
497 rsp->u.accept.id = req->u.accept.id;
498 rsp->ret = ret;
Stefano Stabellinib1efa692017-07-06 11:00:00 -0700499 return 0;
500}
501
502static int pvcalls_back_poll(struct xenbus_device *dev,
503 struct xen_pvcalls_request *req)
504{
505 return 0;
506}
507
508static int pvcalls_back_handle_cmd(struct xenbus_device *dev,
509 struct xen_pvcalls_request *req)
510{
511 int ret = 0;
512
513 switch (req->cmd) {
514 case PVCALLS_SOCKET:
515 ret = pvcalls_back_socket(dev, req);
516 break;
517 case PVCALLS_CONNECT:
518 ret = pvcalls_back_connect(dev, req);
519 break;
520 case PVCALLS_RELEASE:
521 ret = pvcalls_back_release(dev, req);
522 break;
523 case PVCALLS_BIND:
524 ret = pvcalls_back_bind(dev, req);
525 break;
526 case PVCALLS_LISTEN:
527 ret = pvcalls_back_listen(dev, req);
528 break;
529 case PVCALLS_ACCEPT:
530 ret = pvcalls_back_accept(dev, req);
531 break;
532 case PVCALLS_POLL:
533 ret = pvcalls_back_poll(dev, req);
534 break;
535 default:
536 {
537 struct pvcalls_fedata *fedata;
538 struct xen_pvcalls_response *rsp;
539
540 fedata = dev_get_drvdata(&dev->dev);
541 rsp = RING_GET_RESPONSE(
542 &fedata->ring, fedata->ring.rsp_prod_pvt++);
543 rsp->req_id = req->req_id;
544 rsp->cmd = req->cmd;
545 rsp->ret = -ENOTSUPP;
546 break;
547 }
548 }
549 return ret;
550}
551
552static void pvcalls_back_work(struct pvcalls_fedata *fedata)
553{
554 int notify, notify_all = 0, more = 1;
555 struct xen_pvcalls_request req;
556 struct xenbus_device *dev = fedata->dev;
557
558 while (more) {
559 while (RING_HAS_UNCONSUMED_REQUESTS(&fedata->ring)) {
560 RING_COPY_REQUEST(&fedata->ring,
561 fedata->ring.req_cons++,
562 &req);
563
564 if (!pvcalls_back_handle_cmd(dev, &req)) {
565 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(
566 &fedata->ring, notify);
567 notify_all += notify;
568 }
569 }
570
571 if (notify_all) {
572 notify_remote_via_irq(fedata->irq);
573 notify_all = 0;
574 }
575
576 RING_FINAL_CHECK_FOR_REQUESTS(&fedata->ring, more);
577 }
578}
579
Stefano Stabellinid0e4d562017-07-06 10:59:29 -0700580static irqreturn_t pvcalls_back_event(int irq, void *dev_id)
581{
Stefano Stabellinib1efa692017-07-06 11:00:00 -0700582 struct xenbus_device *dev = dev_id;
583 struct pvcalls_fedata *fedata = NULL;
584
585 if (dev == NULL)
586 return IRQ_HANDLED;
587
588 fedata = dev_get_drvdata(&dev->dev);
589 if (fedata == NULL)
590 return IRQ_HANDLED;
591
592 pvcalls_back_work(fedata);
Stefano Stabellinid0e4d562017-07-06 10:59:29 -0700593 return IRQ_HANDLED;
594}
595
Stefano Stabellini5db4d282017-07-06 11:01:06 -0700596static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map)
597{
598 return IRQ_HANDLED;
599}
600
Stefano Stabellini0a9c75c2017-07-06 10:59:17 -0700601static int backend_connect(struct xenbus_device *dev)
602{
Stefano Stabellinid0e4d562017-07-06 10:59:29 -0700603 int err, evtchn;
604 grant_ref_t ring_ref;
605 struct pvcalls_fedata *fedata = NULL;
606
607 fedata = kzalloc(sizeof(struct pvcalls_fedata), GFP_KERNEL);
608 if (!fedata)
609 return -ENOMEM;
610
611 fedata->irq = -1;
612 err = xenbus_scanf(XBT_NIL, dev->otherend, "port", "%u",
613 &evtchn);
614 if (err != 1) {
615 err = -EINVAL;
616 xenbus_dev_fatal(dev, err, "reading %s/event-channel",
617 dev->otherend);
618 goto error;
619 }
620
621 err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-ref", "%u", &ring_ref);
622 if (err != 1) {
623 err = -EINVAL;
624 xenbus_dev_fatal(dev, err, "reading %s/ring-ref",
625 dev->otherend);
626 goto error;
627 }
628
629 err = bind_interdomain_evtchn_to_irq(dev->otherend_id, evtchn);
630 if (err < 0)
631 goto error;
632 fedata->irq = err;
633
634 err = request_threaded_irq(fedata->irq, NULL, pvcalls_back_event,
635 IRQF_ONESHOT, "pvcalls-back", dev);
636 if (err < 0)
637 goto error;
638
639 err = xenbus_map_ring_valloc(dev, &ring_ref, 1,
640 (void **)&fedata->sring);
641 if (err < 0)
642 goto error;
643
644 BACK_RING_INIT(&fedata->ring, fedata->sring, XEN_PAGE_SIZE * 1);
645 fedata->dev = dev;
646
647 INIT_LIST_HEAD(&fedata->socket_mappings);
648 INIT_RADIX_TREE(&fedata->socketpass_mappings, GFP_KERNEL);
649 sema_init(&fedata->socket_lock, 1);
650 dev_set_drvdata(&dev->dev, fedata);
651
652 down(&pvcalls_back_global.frontends_lock);
653 list_add_tail(&fedata->list, &pvcalls_back_global.frontends);
654 up(&pvcalls_back_global.frontends_lock);
655
Stefano Stabellini0a9c75c2017-07-06 10:59:17 -0700656 return 0;
Stefano Stabellinid0e4d562017-07-06 10:59:29 -0700657
658 error:
659 if (fedata->irq >= 0)
660 unbind_from_irqhandler(fedata->irq, dev);
661 if (fedata->sring != NULL)
662 xenbus_unmap_ring_vfree(dev, fedata->sring);
663 kfree(fedata);
664 return err;
Stefano Stabellini0a9c75c2017-07-06 10:59:17 -0700665}
666
667static int backend_disconnect(struct xenbus_device *dev)
668{
669 return 0;
670}
671
Stefano Stabellini72e59c32017-07-05 13:08:39 -0700672static int pvcalls_back_probe(struct xenbus_device *dev,
673 const struct xenbus_device_id *id)
674{
Stefano Stabellini0a9c75c2017-07-06 10:59:17 -0700675 int err, abort;
676 struct xenbus_transaction xbt;
677
678again:
679 abort = 1;
680
681 err = xenbus_transaction_start(&xbt);
682 if (err) {
683 pr_warn("%s cannot create xenstore transaction\n", __func__);
684 return err;
685 }
686
687 err = xenbus_printf(xbt, dev->nodename, "versions", "%s",
688 PVCALLS_VERSIONS);
689 if (err) {
690 pr_warn("%s write out 'versions' failed\n", __func__);
691 goto abort;
692 }
693
694 err = xenbus_printf(xbt, dev->nodename, "max-page-order", "%u",
695 MAX_RING_ORDER);
696 if (err) {
697 pr_warn("%s write out 'max-page-order' failed\n", __func__);
698 goto abort;
699 }
700
701 err = xenbus_printf(xbt, dev->nodename, "function-calls",
702 XENBUS_FUNCTIONS_CALLS);
703 if (err) {
704 pr_warn("%s write out 'function-calls' failed\n", __func__);
705 goto abort;
706 }
707
708 abort = 0;
709abort:
710 err = xenbus_transaction_end(xbt, abort);
711 if (err) {
712 if (err == -EAGAIN && !abort)
713 goto again;
714 pr_warn("%s cannot complete xenstore transaction\n", __func__);
715 return err;
716 }
717
718 if (abort)
719 return -EFAULT;
720
721 xenbus_switch_state(dev, XenbusStateInitWait);
722
Stefano Stabellini72e59c32017-07-05 13:08:39 -0700723 return 0;
724}
725
Stefano Stabellini0a9c75c2017-07-06 10:59:17 -0700726static void set_backend_state(struct xenbus_device *dev,
727 enum xenbus_state state)
728{
729 while (dev->state != state) {
730 switch (dev->state) {
731 case XenbusStateClosed:
732 switch (state) {
733 case XenbusStateInitWait:
734 case XenbusStateConnected:
735 xenbus_switch_state(dev, XenbusStateInitWait);
736 break;
737 case XenbusStateClosing:
738 xenbus_switch_state(dev, XenbusStateClosing);
739 break;
740 default:
741 __WARN();
742 }
743 break;
744 case XenbusStateInitWait:
745 case XenbusStateInitialised:
746 switch (state) {
747 case XenbusStateConnected:
748 backend_connect(dev);
749 xenbus_switch_state(dev, XenbusStateConnected);
750 break;
751 case XenbusStateClosing:
752 case XenbusStateClosed:
753 xenbus_switch_state(dev, XenbusStateClosing);
754 break;
755 default:
756 __WARN();
757 }
758 break;
759 case XenbusStateConnected:
760 switch (state) {
761 case XenbusStateInitWait:
762 case XenbusStateClosing:
763 case XenbusStateClosed:
764 down(&pvcalls_back_global.frontends_lock);
765 backend_disconnect(dev);
766 up(&pvcalls_back_global.frontends_lock);
767 xenbus_switch_state(dev, XenbusStateClosing);
768 break;
769 default:
770 __WARN();
771 }
772 break;
773 case XenbusStateClosing:
774 switch (state) {
775 case XenbusStateInitWait:
776 case XenbusStateConnected:
777 case XenbusStateClosed:
778 xenbus_switch_state(dev, XenbusStateClosed);
779 break;
780 default:
781 __WARN();
782 }
783 break;
784 default:
785 __WARN();
786 }
787 }
788}
789
Stefano Stabellini72e59c32017-07-05 13:08:39 -0700790static void pvcalls_back_changed(struct xenbus_device *dev,
791 enum xenbus_state frontend_state)
792{
Stefano Stabellini0a9c75c2017-07-06 10:59:17 -0700793 switch (frontend_state) {
794 case XenbusStateInitialising:
795 set_backend_state(dev, XenbusStateInitWait);
796 break;
797
798 case XenbusStateInitialised:
799 case XenbusStateConnected:
800 set_backend_state(dev, XenbusStateConnected);
801 break;
802
803 case XenbusStateClosing:
804 set_backend_state(dev, XenbusStateClosing);
805 break;
806
807 case XenbusStateClosed:
808 set_backend_state(dev, XenbusStateClosed);
809 if (xenbus_dev_is_online(dev))
810 break;
811 device_unregister(&dev->dev);
812 break;
813 case XenbusStateUnknown:
814 set_backend_state(dev, XenbusStateClosed);
815 device_unregister(&dev->dev);
816 break;
817
818 default:
819 xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
820 frontend_state);
821 break;
822 }
Stefano Stabellini72e59c32017-07-05 13:08:39 -0700823}
824
825static int pvcalls_back_remove(struct xenbus_device *dev)
826{
827 return 0;
828}
829
830static int pvcalls_back_uevent(struct xenbus_device *xdev,
831 struct kobj_uevent_env *env)
832{
833 return 0;
834}
835
836static const struct xenbus_device_id pvcalls_back_ids[] = {
837 { "pvcalls" },
838 { "" }
839};
840
841static struct xenbus_driver pvcalls_back_driver = {
842 .ids = pvcalls_back_ids,
843 .probe = pvcalls_back_probe,
844 .remove = pvcalls_back_remove,
845 .uevent = pvcalls_back_uevent,
846 .otherend_changed = pvcalls_back_changed,
847};
Stefano Stabellini9be07332017-07-05 13:08:48 -0700848
849static int __init pvcalls_back_init(void)
850{
851 int ret;
852
853 if (!xen_domain())
854 return -ENODEV;
855
856 ret = xenbus_register_backend(&pvcalls_back_driver);
857 if (ret < 0)
858 return ret;
859
860 sema_init(&pvcalls_back_global.frontends_lock, 1);
861 INIT_LIST_HEAD(&pvcalls_back_global.frontends);
862 return 0;
863}
864module_init(pvcalls_back_init);