blob: 9f8372079ecfadcfe970540fc62502f36e9c335e [file] [log] [blame]
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -07001/******************************************************************************
2 * Client-facing interface for the Xenbus driver. In other words, the
3 * interface between the Xenbus and the device-specific code, be it the
4 * frontend or the backend of that driver.
5 *
6 * Copyright (C) 2005 XenSource Ltd
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
Steven Noonan45e27162013-03-01 05:14:59 -080033#include <linux/mm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -070035#include <linux/types.h>
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -050036#include <linux/spinlock.h>
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -070037#include <linux/vmalloc.h>
Paul Gortmaker63c97442011-07-10 13:22:07 -040038#include <linux/export.h>
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -070039#include <asm/xen/hypervisor.h>
Julien Gralla9fd60e2015-06-17 15:28:02 +010040#include <xen/page.h>
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -070041#include <xen/interface/xen.h>
42#include <xen/interface/event_channel.h>
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -050043#include <xen/balloon.h>
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -070044#include <xen/events.h>
45#include <xen/grant_table.h>
46#include <xen/xenbus.h>
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -050047#include <xen/xen.h>
Mukesh Rathorbe3e9cf2013-12-31 13:57:35 -050048#include <xen/features.h>
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -050049
Juergen Gross332f7912017-02-09 14:39:56 +010050#include "xenbus.h"
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -050051
Julien Grall89bf4b42015-10-13 17:50:13 +010052#define XENBUS_PAGES(_grants) (DIV_ROUND_UP(_grants, XEN_PFN_PER_PAGE))
53
54#define XENBUS_MAX_RING_PAGES (XENBUS_PAGES(XENBUS_MAX_RING_GRANTS))
55
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -050056struct xenbus_map_node {
57 struct list_head next;
58 union {
Wei Liuccc9d902015-04-03 14:44:59 +080059 struct {
60 struct vm_struct *area;
61 } pv;
62 struct {
Julien Grall89bf4b42015-10-13 17:50:13 +010063 struct page *pages[XENBUS_MAX_RING_PAGES];
64 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
Wei Liuccc9d902015-04-03 14:44:59 +080065 void *addr;
66 } hvm;
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -050067 };
Julien Grall9cce2912015-10-13 17:50:11 +010068 grant_handle_t handles[XENBUS_MAX_RING_GRANTS];
Wei Liuccc9d902015-04-03 14:44:59 +080069 unsigned int nr_handles;
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -050070};
71
Juergen Gross3848e4e2020-07-01 14:16:37 +020072struct map_ring_valloc {
73 struct xenbus_map_node *node;
74
75 /* Why do we need two arrays? See comment of __xenbus_map_ring */
76 union {
77 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
78 pte_t *ptes[XENBUS_MAX_RING_GRANTS];
79 };
80 phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
81
82 struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
83 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
84
85 unsigned int idx; /* HVM only. */
86};
87
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -050088static DEFINE_SPINLOCK(xenbus_valloc_lock);
89static LIST_HEAD(xenbus_valloc_pages);
90
91struct xenbus_ring_ops {
Juergen Gross3848e4e2020-07-01 14:16:37 +020092 int (*map)(struct xenbus_device *dev, struct map_ring_valloc *info,
Wei Liuccc9d902015-04-03 14:44:59 +080093 grant_ref_t *gnt_refs, unsigned int nr_grefs,
94 void **vaddr);
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -050095 int (*unmap)(struct xenbus_device *dev, void *vaddr);
96};
97
98static const struct xenbus_ring_ops *ring_ops __read_mostly;
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -070099
100const char *xenbus_strstate(enum xenbus_state state)
101{
102 static const char *const name[] = {
103 [ XenbusStateUnknown ] = "Unknown",
104 [ XenbusStateInitialising ] = "Initialising",
105 [ XenbusStateInitWait ] = "InitWait",
106 [ XenbusStateInitialised ] = "Initialised",
107 [ XenbusStateConnected ] = "Connected",
108 [ XenbusStateClosing ] = "Closing",
109 [ XenbusStateClosed ] = "Closed",
Yosuke Iwamatsu89afb6e2009-10-13 17:22:27 -0400110 [XenbusStateReconfiguring] = "Reconfiguring",
111 [XenbusStateReconfigured] = "Reconfigured",
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700112 };
113 return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
114}
115EXPORT_SYMBOL_GPL(xenbus_strstate);
116
117/**
118 * xenbus_watch_path - register a watch
119 * @dev: xenbus device
120 * @path: path to watch
121 * @watch: watch to register
122 * @callback: callback to register
123 *
124 * Register a @watch on the given path, using the given xenbus_watch structure
125 * for storage, and the given @callback function as the callback. Return 0 on
126 * success, or -errno on error. On success, the given @path will be saved as
127 * @watch->node, and remains the caller's to free. On error, @watch->node will
128 * be NULL, the device will switch to %XenbusStateClosing, and the error will
129 * be saved in the store.
130 */
131int xenbus_watch_path(struct xenbus_device *dev, const char *path,
132 struct xenbus_watch *watch,
133 void (*callback)(struct xenbus_watch *,
Juergen Gross5584ea22017-02-09 14:39:57 +0100134 const char *, const char *))
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700135{
136 int err;
137
138 watch->node = path;
139 watch->callback = callback;
140
141 err = register_xenbus_watch(watch);
142
143 if (err) {
144 watch->node = NULL;
145 watch->callback = NULL;
146 xenbus_dev_fatal(dev, err, "adding watch on %s", path);
147 }
148
149 return err;
150}
151EXPORT_SYMBOL_GPL(xenbus_watch_path);
152
153
154/**
155 * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
156 * @dev: xenbus device
157 * @watch: watch to register
158 * @callback: callback to register
159 * @pathfmt: format of path to watch
160 *
161 * Register a watch on the given @path, using the given xenbus_watch
162 * structure for storage, and the given @callback function as the callback.
163 * Return 0 on success, or -errno on error. On success, the watched path
164 * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
165 * kfree(). On error, watch->node will be NULL, so the caller has nothing to
166 * free, the device will switch to %XenbusStateClosing, and the error will be
167 * saved in the store.
168 */
169int xenbus_watch_pathfmt(struct xenbus_device *dev,
170 struct xenbus_watch *watch,
171 void (*callback)(struct xenbus_watch *,
Juergen Gross5584ea22017-02-09 14:39:57 +0100172 const char *, const char *),
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700173 const char *pathfmt, ...)
174{
175 int err;
176 va_list ap;
177 char *path;
178
179 va_start(ap, pathfmt);
Ian Campbella144ff02008-06-17 10:47:08 +0200180 path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700181 va_end(ap);
182
183 if (!path) {
184 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
185 return -ENOMEM;
186 }
187 err = xenbus_watch_path(dev, path, watch, callback);
188
189 if (err)
190 kfree(path);
191 return err;
192}
193EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
194
Daniel Stodden5b61cb92010-04-30 22:01:15 +0000195static void xenbus_switch_fatal(struct xenbus_device *, int, int,
196 const char *, ...);
197
198static int
199__xenbus_switch_state(struct xenbus_device *dev,
200 enum xenbus_state state, int depth)
201{
202 /* We check whether the state is currently set to the given value, and
203 if not, then the state is set. We don't want to unconditionally
204 write the given state, because we don't want to fire watches
205 unnecessarily. Furthermore, if the node has gone, we don't write
206 to it, as the device will be tearing down, and we don't want to
207 resurrect that directory.
208
209 Note that, because of this cached value of our state, this
210 function will not take a caller's Xenstore transaction
211 (something it was trying to in the past) because dev->state
212 would not get reset if the transaction was aborted.
213 */
214
215 struct xenbus_transaction xbt;
216 int current_state;
217 int err, abort;
218
219 if (state == dev->state)
220 return 0;
221
222again:
223 abort = 1;
224
225 err = xenbus_transaction_start(&xbt);
226 if (err) {
227 xenbus_switch_fatal(dev, depth, err, "starting transaction");
228 return 0;
229 }
230
231 err = xenbus_scanf(xbt, dev->nodename, "state", "%d", &current_state);
232 if (err != 1)
233 goto abort;
234
235 err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
236 if (err) {
237 xenbus_switch_fatal(dev, depth, err, "writing new state");
238 goto abort;
239 }
240
241 abort = 0;
242abort:
243 err = xenbus_transaction_end(xbt, abort);
244 if (err) {
245 if (err == -EAGAIN && !abort)
246 goto again;
247 xenbus_switch_fatal(dev, depth, err, "ending transaction");
248 } else
249 dev->state = state;
250
251 return 0;
252}
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700253
254/**
255 * xenbus_switch_state
256 * @dev: xenbus device
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700257 * @state: new state
258 *
259 * Advertise in the store a change of the given driver to the given new_state.
260 * Return 0 on success, or -errno on error. On error, the device will switch
261 * to XenbusStateClosing, and the error will be saved in the store.
262 */
263int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
264{
Daniel Stodden5b61cb92010-04-30 22:01:15 +0000265 return __xenbus_switch_state(dev, state, 0);
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700266}
Daniel Stodden5b61cb92010-04-30 22:01:15 +0000267
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700268EXPORT_SYMBOL_GPL(xenbus_switch_state);
269
270int xenbus_frontend_closed(struct xenbus_device *dev)
271{
272 xenbus_switch_state(dev, XenbusStateClosed);
273 complete(&dev->down);
274 return 0;
275}
276EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
277
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700278static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
279 const char *fmt, va_list ap)
280{
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700281 unsigned int len;
Joe Perchesc0d197d2017-02-08 03:33:36 -0800282 char *printf_buffer;
283 char *path_buffer;
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700284
285#define PRINTF_BUFFER_SIZE 4096
Joe Perchesc0d197d2017-02-08 03:33:36 -0800286
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700287 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
Joe Perchesc0d197d2017-02-08 03:33:36 -0800288 if (!printf_buffer)
289 return;
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700290
291 len = sprintf(printf_buffer, "%i ", -err);
Joe Perchesc0d197d2017-02-08 03:33:36 -0800292 vsnprintf(printf_buffer + len, PRINTF_BUFFER_SIZE - len, fmt, ap);
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700293
294 dev_err(&dev->dev, "%s\n", printf_buffer);
295
Joe Perchesc0d197d2017-02-08 03:33:36 -0800296 path_buffer = kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
Juergen Gross7a048ce2018-10-09 18:09:59 +0200297 if (path_buffer)
298 xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer);
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700299
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700300 kfree(printf_buffer);
301 kfree(path_buffer);
302}
303
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700304/**
305 * xenbus_dev_error
306 * @dev: xenbus device
307 * @err: error to report
308 * @fmt: error message format
309 *
310 * Report the given negative errno into the store, along with the given
311 * formatted message.
312 */
313void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
314{
315 va_list ap;
316
317 va_start(ap, fmt);
318 xenbus_va_dev_error(dev, err, fmt, ap);
319 va_end(ap);
320}
321EXPORT_SYMBOL_GPL(xenbus_dev_error);
322
323/**
324 * xenbus_dev_fatal
325 * @dev: xenbus device
326 * @err: error to report
327 * @fmt: error message format
328 *
329 * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
Qinghuang Fengd8220342009-01-07 18:07:10 -0800330 * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700331 * closedown of this driver and its peer.
332 */
333
334void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
335{
336 va_list ap;
337
338 va_start(ap, fmt);
339 xenbus_va_dev_error(dev, err, fmt, ap);
340 va_end(ap);
341
342 xenbus_switch_state(dev, XenbusStateClosing);
343}
344EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
345
346/**
Daniel Stodden5b61cb92010-04-30 22:01:15 +0000347 * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps
348 * avoiding recursion within xenbus_switch_state.
349 */
350static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
351 const char *fmt, ...)
352{
353 va_list ap;
354
355 va_start(ap, fmt);
356 xenbus_va_dev_error(dev, err, fmt, ap);
357 va_end(ap);
358
359 if (!depth)
360 __xenbus_switch_state(dev, XenbusStateClosing, 1);
361}
362
363/**
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700364 * xenbus_grant_ring
365 * @dev: xenbus device
Wei Liuccc9d902015-04-03 14:44:59 +0800366 * @vaddr: starting virtual address of the ring
367 * @nr_pages: number of pages to be granted
368 * @grefs: grant reference array to be filled in
369 *
370 * Grant access to the given @vaddr to the peer of the given device.
371 * Then fill in @grefs with grant references. Return 0 on success, or
372 * -errno on error. On error, the device will switch to
373 * XenbusStateClosing, and the error will be saved in the store.
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700374 */
Wei Liuccc9d902015-04-03 14:44:59 +0800375int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
376 unsigned int nr_pages, grant_ref_t *grefs)
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700377{
Wei Liuccc9d902015-04-03 14:44:59 +0800378 int err;
379 int i, j;
380
381 for (i = 0; i < nr_pages; i++) {
Wei Liuccc9d902015-04-03 14:44:59 +0800382 err = gnttab_grant_foreign_access(dev->otherend_id,
Julien Grall0df4f262015-08-07 17:34:37 +0100383 virt_to_gfn(vaddr), 0);
Wei Liuccc9d902015-04-03 14:44:59 +0800384 if (err < 0) {
385 xenbus_dev_fatal(dev, err,
386 "granting access to ring page");
387 goto fail;
388 }
389 grefs[i] = err;
Julien Grallc9fd55e2015-06-17 15:28:03 +0100390
Julien Grall7d567922015-05-05 16:38:27 +0100391 vaddr = vaddr + XEN_PAGE_SIZE;
Wei Liuccc9d902015-04-03 14:44:59 +0800392 }
393
394 return 0;
395
396fail:
397 for (j = 0; j < i; j++)
398 gnttab_end_foreign_access_ref(grefs[j], 0);
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700399 return err;
400}
401EXPORT_SYMBOL_GPL(xenbus_grant_ring);
402
403
404/**
405 * Allocate an event channel for the given xenbus_device, assigning the newly
406 * created local port to *port. Return 0 on success, or -errno on error. On
407 * error, the device will switch to XenbusStateClosing, and the error will be
408 * saved in the store.
409 */
Yan Yankovskyi0102e4e2020-03-23 18:15:11 +0200410int xenbus_alloc_evtchn(struct xenbus_device *dev, evtchn_port_t *port)
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700411{
412 struct evtchn_alloc_unbound alloc_unbound;
413 int err;
414
415 alloc_unbound.dom = DOMID_SELF;
416 alloc_unbound.remote_dom = dev->otherend_id;
417
418 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
419 &alloc_unbound);
420 if (err)
421 xenbus_dev_fatal(dev, err, "allocating event channel");
422 else
423 *port = alloc_unbound.port;
424
425 return err;
426}
427EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
428
429
430/**
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700431 * Free an existing event channel. Returns 0 on success or -errno on error.
432 */
Yan Yankovskyi0102e4e2020-03-23 18:15:11 +0200433int xenbus_free_evtchn(struct xenbus_device *dev, evtchn_port_t port)
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700434{
435 struct evtchn_close close;
436 int err;
437
438 close.port = port;
439
440 err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
441 if (err)
Yan Yankovskyi0102e4e2020-03-23 18:15:11 +0200442 xenbus_dev_error(dev, err, "freeing event channel %u", port);
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700443
444 return err;
445}
446EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
447
448
449/**
450 * xenbus_map_ring_valloc
451 * @dev: xenbus device
Wei Liuccc9d902015-04-03 14:44:59 +0800452 * @gnt_refs: grant reference array
453 * @nr_grefs: number of grant references
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700454 * @vaddr: pointer to address to be filled out by mapping
455 *
Wei Liuccc9d902015-04-03 14:44:59 +0800456 * Map @nr_grefs pages of memory into this domain from another
457 * domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs
458 * pages of virtual address space, maps the pages to that address, and
459 * sets *vaddr to that address. Returns 0 on success, and GNTST_*
460 * (see xen/include/interface/grant_table.h) or -ENOMEM / -EINVAL on
461 * error. If an error is returned, device will switch to
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700462 * XenbusStateClosing and the error message will be saved in XenStore.
463 */
Wei Liuccc9d902015-04-03 14:44:59 +0800464int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
465 unsigned int nr_grefs, void **vaddr)
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700466{
Juergen Gross6b51fd32020-03-26 09:03:58 +0100467 int err;
Juergen Gross3848e4e2020-07-01 14:16:37 +0200468 struct map_ring_valloc *info;
Juergen Gross6b51fd32020-03-26 09:03:58 +0100469
Juergen Gross3848e4e2020-07-01 14:16:37 +0200470 *vaddr = NULL;
471
472 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
473 return -EINVAL;
474
475 info = kzalloc(sizeof(*info), GFP_KERNEL);
476 if (!info)
477 return -ENOMEM;
478
479 info->node = kzalloc(sizeof(*info->node), GFP_KERNEL);
480 if (!info->node) {
481 err = -ENOMEM;
482 goto out;
483 }
484
485 err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr);
486
Juergen Gross6b51fd32020-03-26 09:03:58 +0100487 /* Some hypervisors are buggy and can return 1. */
488 if (err > 0)
489 err = GNTST_general_error;
490
Juergen Gross3848e4e2020-07-01 14:16:37 +0200491 out:
492 kfree(info->node);
493 kfree(info);
Juergen Gross6b51fd32020-03-26 09:03:58 +0100494 return err;
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500495}
496EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
497
Wei Liuccc9d902015-04-03 14:44:59 +0800498/* N.B. sizeof(phys_addr_t) doesn't always equal to sizeof(unsigned
499 * long), e.g. 32-on-64. Caller is responsible for preparing the
500 * right array to feed into this function */
501static int __xenbus_map_ring(struct xenbus_device *dev,
502 grant_ref_t *gnt_refs,
503 unsigned int nr_grefs,
504 grant_handle_t *handles,
Juergen Gross3848e4e2020-07-01 14:16:37 +0200505 struct map_ring_valloc *info,
Wei Liuccc9d902015-04-03 14:44:59 +0800506 unsigned int flags,
507 bool *leaked)
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500508{
Wei Liuccc9d902015-04-03 14:44:59 +0800509 int i, j;
510 int err = GNTST_okay;
511
Julien Grall9cce2912015-10-13 17:50:11 +0100512 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
Wei Liuccc9d902015-04-03 14:44:59 +0800513 return -EINVAL;
514
515 for (i = 0; i < nr_grefs; i++) {
Juergen Gross3848e4e2020-07-01 14:16:37 +0200516 gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags,
517 gnt_refs[i], dev->otherend_id);
Wei Liuccc9d902015-04-03 14:44:59 +0800518 handles[i] = INVALID_GRANT_HANDLE;
519 }
520
Juergen Gross3848e4e2020-07-01 14:16:37 +0200521 gnttab_batch_map(info->map, i);
Wei Liuccc9d902015-04-03 14:44:59 +0800522
523 for (i = 0; i < nr_grefs; i++) {
Juergen Gross3848e4e2020-07-01 14:16:37 +0200524 if (info->map[i].status != GNTST_okay) {
525 err = info->map[i].status;
526 xenbus_dev_fatal(dev, info->map[i].status,
Wei Liuccc9d902015-04-03 14:44:59 +0800527 "mapping in shared page %d from domain %d",
528 gnt_refs[i], dev->otherend_id);
529 goto fail;
530 } else
Juergen Gross3848e4e2020-07-01 14:16:37 +0200531 handles[i] = info->map[i].handle;
Wei Liuccc9d902015-04-03 14:44:59 +0800532 }
533
534 return GNTST_okay;
535
536 fail:
537 for (i = j = 0; i < nr_grefs; i++) {
538 if (handles[i] != INVALID_GRANT_HANDLE) {
Juergen Gross3848e4e2020-07-01 14:16:37 +0200539 gnttab_set_unmap_op(&info->unmap[j],
540 info->phys_addrs[i],
Wei Liuccc9d902015-04-03 14:44:59 +0800541 GNTMAP_host_map, handles[i]);
542 j++;
543 }
544 }
545
Juergen Gross3848e4e2020-07-01 14:16:37 +0200546 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j))
Wei Liuccc9d902015-04-03 14:44:59 +0800547 BUG();
548
549 *leaked = false;
550 for (i = 0; i < j; i++) {
Juergen Gross3848e4e2020-07-01 14:16:37 +0200551 if (info->unmap[i].status != GNTST_okay) {
Wei Liuccc9d902015-04-03 14:44:59 +0800552 *leaked = true;
553 break;
554 }
555 }
556
557 return err;
558}
559
Juergen Grossb28089a2020-03-09 16:54:41 +0100560/**
561 * xenbus_unmap_ring
562 * @dev: xenbus device
563 * @handles: grant handle array
564 * @nr_handles: number of handles in the array
565 * @vaddrs: addresses to unmap
566 *
567 * Unmap memory in this domain that was imported from another domain.
568 * Returns 0 on success and returns GNTST_* on error
569 * (see xen/include/interface/grant_table.h).
570 */
571static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles,
572 unsigned int nr_handles, unsigned long *vaddrs)
573{
574 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
575 int i;
576 int err;
577
578 if (nr_handles > XENBUS_MAX_RING_GRANTS)
579 return -EINVAL;
580
581 for (i = 0; i < nr_handles; i++)
582 gnttab_set_unmap_op(&unmap[i], vaddrs[i],
583 GNTMAP_host_map, handles[i]);
584
585 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
586 BUG();
587
588 err = GNTST_okay;
589 for (i = 0; i < nr_handles; i++) {
590 if (unmap[i].status != GNTST_okay) {
591 xenbus_dev_error(dev, unmap[i].status,
592 "unmapping page at handle %d error %d",
593 handles[i], unmap[i].status);
594 err = unmap[i].status;
595 break;
596 }
597 }
598
599 return err;
600}
601
Julien Grall89bf4b42015-10-13 17:50:13 +0100602static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
603 unsigned int goffset,
604 unsigned int len,
605 void *data)
606{
Juergen Gross3848e4e2020-07-01 14:16:37 +0200607 struct map_ring_valloc *info = data;
Julien Grall89bf4b42015-10-13 17:50:13 +0100608 unsigned long vaddr = (unsigned long)gfn_to_virt(gfn);
609
610 info->phys_addrs[info->idx] = vaddr;
611 info->addrs[info->idx] = vaddr;
612
613 info->idx++;
614}
615
Juergen Gross3848e4e2020-07-01 14:16:37 +0200616static int xenbus_map_ring_hvm(struct xenbus_device *dev,
617 struct map_ring_valloc *info,
618 grant_ref_t *gnt_ref,
619 unsigned int nr_grefs,
620 void **vaddr)
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500621{
Juergen Gross3848e4e2020-07-01 14:16:37 +0200622 struct xenbus_map_node *node = info->node;
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500623 int err;
624 void *addr;
Wei Liuccc9d902015-04-03 14:44:59 +0800625 bool leaked = false;
Julien Grall89bf4b42015-10-13 17:50:13 +0100626 unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
Wei Liuccc9d902015-04-03 14:44:59 +0800627
Julien Grall89bf4b42015-10-13 17:50:13 +0100628 err = alloc_xenballooned_pages(nr_pages, node->hvm.pages);
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500629 if (err)
630 goto out_err;
631
Julien Grall89bf4b42015-10-13 17:50:13 +0100632 gnttab_foreach_grant(node->hvm.pages, nr_grefs,
633 xenbus_map_ring_setup_grant_hvm,
Juergen Gross3848e4e2020-07-01 14:16:37 +0200634 info);
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500635
Wei Liuccc9d902015-04-03 14:44:59 +0800636 err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
Juergen Gross3848e4e2020-07-01 14:16:37 +0200637 info, GNTMAP_host_map, &leaked);
Wei Liuccc9d902015-04-03 14:44:59 +0800638 node->nr_handles = nr_grefs;
639
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500640 if (err)
Wei Liuccc9d902015-04-03 14:44:59 +0800641 goto out_free_ballooned_pages;
642
Julien Grall89bf4b42015-10-13 17:50:13 +0100643 addr = vmap(node->hvm.pages, nr_pages, VM_MAP | VM_IOREMAP,
Wei Liuccc9d902015-04-03 14:44:59 +0800644 PAGE_KERNEL);
645 if (!addr) {
646 err = -ENOMEM;
647 goto out_xenbus_unmap_ring;
648 }
649
650 node->hvm.addr = addr;
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500651
652 spin_lock(&xenbus_valloc_lock);
653 list_add(&node->next, &xenbus_valloc_pages);
654 spin_unlock(&xenbus_valloc_lock);
655
656 *vaddr = addr;
Juergen Gross3848e4e2020-07-01 14:16:37 +0200657 info->node = NULL;
658
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500659 return 0;
660
Wei Liuccc9d902015-04-03 14:44:59 +0800661 out_xenbus_unmap_ring:
662 if (!leaked)
Juergen Gross3848e4e2020-07-01 14:16:37 +0200663 xenbus_unmap_ring(dev, node->handles, nr_grefs, info->addrs);
Wei Liuccc9d902015-04-03 14:44:59 +0800664 else
665 pr_alert("leaking %p size %u page(s)",
Julien Grall89bf4b42015-10-13 17:50:13 +0100666 addr, nr_pages);
Wei Liuccc9d902015-04-03 14:44:59 +0800667 out_free_ballooned_pages:
668 if (!leaked)
Julien Grall89bf4b42015-10-13 17:50:13 +0100669 free_xenballooned_pages(nr_pages, node->hvm.pages);
Wei Liu8d0b8802013-05-29 17:02:58 +0100670 out_err:
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500671 return err;
672}
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700673
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700674/**
675 * xenbus_unmap_ring_vfree
676 * @dev: xenbus device
677 * @vaddr: addr to unmap
678 *
679 * Based on Rusty Russell's skeleton driver's unmap_page.
680 * Unmap a page of memory in this domain that was imported from another domain.
681 * Use xenbus_unmap_ring_vfree if you mapped in your memory with
682 * xenbus_map_ring_valloc (it will free the virtual address space).
683 * Returns 0 on success and returns GNTST_* on error
684 * (see xen/include/interface/grant_table.h).
685 */
686int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
687{
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500688 return ring_ops->unmap(dev, vaddr);
689}
690EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
691
Juergen Grossfe9c1c92017-09-14 14:38:58 +0200692#ifdef CONFIG_XEN_PV
Juergen Gross3848e4e2020-07-01 14:16:37 +0200693static int xenbus_map_ring_pv(struct xenbus_device *dev,
694 struct map_ring_valloc *info,
695 grant_ref_t *gnt_refs,
696 unsigned int nr_grefs,
697 void **vaddr)
Juergen Grossfe9c1c92017-09-14 14:38:58 +0200698{
Juergen Gross3848e4e2020-07-01 14:16:37 +0200699 struct xenbus_map_node *node = info->node;
Juergen Grossfe9c1c92017-09-14 14:38:58 +0200700 struct vm_struct *area;
Juergen Grossfe9c1c92017-09-14 14:38:58 +0200701 int err = GNTST_okay;
702 int i;
703 bool leaked;
704
Juergen Gross3848e4e2020-07-01 14:16:37 +0200705 area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, info->ptes);
Juergen Grossfe9c1c92017-09-14 14:38:58 +0200706 if (!area) {
707 kfree(node);
708 return -ENOMEM;
709 }
710
711 for (i = 0; i < nr_grefs; i++)
Juergen Gross3848e4e2020-07-01 14:16:37 +0200712 info->phys_addrs[i] =
713 arbitrary_virt_to_machine(info->ptes[i]).maddr;
Juergen Grossfe9c1c92017-09-14 14:38:58 +0200714
715 err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
Juergen Gross3848e4e2020-07-01 14:16:37 +0200716 info, GNTMAP_host_map | GNTMAP_contains_pte,
Juergen Grossfe9c1c92017-09-14 14:38:58 +0200717 &leaked);
718 if (err)
719 goto failed;
720
721 node->nr_handles = nr_grefs;
722 node->pv.area = area;
723
724 spin_lock(&xenbus_valloc_lock);
725 list_add(&node->next, &xenbus_valloc_pages);
726 spin_unlock(&xenbus_valloc_lock);
727
728 *vaddr = area->addr;
Juergen Gross3848e4e2020-07-01 14:16:37 +0200729 info->node = NULL;
730
Juergen Grossfe9c1c92017-09-14 14:38:58 +0200731 return 0;
732
733failed:
734 if (!leaked)
735 free_vm_area(area);
736 else
737 pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
738
Juergen Grossfe9c1c92017-09-14 14:38:58 +0200739 return err;
740}
741
Juergen Gross3848e4e2020-07-01 14:16:37 +0200742static int xenbus_unmap_ring_pv(struct xenbus_device *dev, void *vaddr)
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500743{
744 struct xenbus_map_node *node;
Julien Grall9cce2912015-10-13 17:50:11 +0100745 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
David Vrabelcd129092011-09-29 16:53:32 +0100746 unsigned int level;
Wei Liuccc9d902015-04-03 14:44:59 +0800747 int i;
748 bool leaked = false;
749 int err;
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700750
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500751 spin_lock(&xenbus_valloc_lock);
752 list_for_each_entry(node, &xenbus_valloc_pages, next) {
Wei Liuccc9d902015-04-03 14:44:59 +0800753 if (node->pv.area->addr == vaddr) {
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500754 list_del(&node->next);
755 goto found;
756 }
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700757 }
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500758 node = NULL;
759 found:
760 spin_unlock(&xenbus_valloc_lock);
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700761
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500762 if (!node) {
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700763 xenbus_dev_error(dev, -ENOENT,
764 "can't find mapped virtual address %p", vaddr);
765 return GNTST_bad_virt_addr;
766 }
767
Wei Liuccc9d902015-04-03 14:44:59 +0800768 for (i = 0; i < node->nr_handles; i++) {
769 unsigned long addr;
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700770
Wei Liuccc9d902015-04-03 14:44:59 +0800771 memset(&unmap[i], 0, sizeof(unmap[i]));
Julien Grall7d567922015-05-05 16:38:27 +0100772 addr = (unsigned long)vaddr + (XEN_PAGE_SIZE * i);
Wei Liuccc9d902015-04-03 14:44:59 +0800773 unmap[i].host_addr = arbitrary_virt_to_machine(
774 lookup_address(addr, &level)).maddr;
775 unmap[i].dev_bus_addr = 0;
776 unmap[i].handle = node->handles[i];
777 }
778
779 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700780 BUG();
781
Wei Liuccc9d902015-04-03 14:44:59 +0800782 err = GNTST_okay;
783 leaked = false;
784 for (i = 0; i < node->nr_handles; i++) {
785 if (unmap[i].status != GNTST_okay) {
786 leaked = true;
787 xenbus_dev_error(dev, unmap[i].status,
788 "unmapping page at handle %d error %d",
789 node->handles[i], unmap[i].status);
790 err = unmap[i].status;
791 break;
792 }
793 }
794
795 if (!leaked)
796 free_vm_area(node->pv.area);
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700797 else
Wei Liuccc9d902015-04-03 14:44:59 +0800798 pr_alert("leaking VM area %p size %u page(s)",
799 node->pv.area, node->nr_handles);
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700800
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500801 kfree(node);
Wei Liuccc9d902015-04-03 14:44:59 +0800802 return err;
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700803}
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700804
Juergen Grossfe9c1c92017-09-14 14:38:58 +0200805static const struct xenbus_ring_ops ring_ops_pv = {
Juergen Gross3848e4e2020-07-01 14:16:37 +0200806 .map = xenbus_map_ring_pv,
807 .unmap = xenbus_unmap_ring_pv,
Juergen Grossfe9c1c92017-09-14 14:38:58 +0200808};
809#endif
810
Juergen Gross3848e4e2020-07-01 14:16:37 +0200811struct unmap_ring_hvm
Julien Grall89bf4b42015-10-13 17:50:13 +0100812{
813 unsigned int idx;
814 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
815};
816
817static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn,
818 unsigned int goffset,
819 unsigned int len,
820 void *data)
821{
Juergen Gross3848e4e2020-07-01 14:16:37 +0200822 struct unmap_ring_hvm *info = data;
Julien Grall89bf4b42015-10-13 17:50:13 +0100823
824 info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn);
825
826 info->idx++;
827}
828
Juergen Gross3848e4e2020-07-01 14:16:37 +0200829static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr)
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500830{
831 int rv;
832 struct xenbus_map_node *node;
833 void *addr;
Juergen Gross3848e4e2020-07-01 14:16:37 +0200834 struct unmap_ring_hvm info = {
Julien Grall89bf4b42015-10-13 17:50:13 +0100835 .idx = 0,
836 };
837 unsigned int nr_pages;
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500838
839 spin_lock(&xenbus_valloc_lock);
840 list_for_each_entry(node, &xenbus_valloc_pages, next) {
Wei Liuccc9d902015-04-03 14:44:59 +0800841 addr = node->hvm.addr;
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500842 if (addr == vaddr) {
843 list_del(&node->next);
844 goto found;
845 }
846 }
Jan Beulich5ac08002012-02-24 11:46:32 +0000847 node = addr = NULL;
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500848 found:
849 spin_unlock(&xenbus_valloc_lock);
850
851 if (!node) {
852 xenbus_dev_error(dev, -ENOENT,
853 "can't find mapped virtual address %p", vaddr);
854 return GNTST_bad_virt_addr;
855 }
856
Julien Grall89bf4b42015-10-13 17:50:13 +0100857 nr_pages = XENBUS_PAGES(node->nr_handles);
858
859 gnttab_foreach_grant(node->hvm.pages, node->nr_handles,
860 xenbus_unmap_ring_setup_grant_hvm,
861 &info);
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500862
Wei Liuccc9d902015-04-03 14:44:59 +0800863 rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
Julien Grall89bf4b42015-10-13 17:50:13 +0100864 info.addrs);
Julien Grallc22fe512015-08-10 19:10:38 +0100865 if (!rv) {
Wei Liuccc9d902015-04-03 14:44:59 +0800866 vunmap(vaddr);
Julien Grall89bf4b42015-10-13 17:50:13 +0100867 free_xenballooned_pages(nr_pages, node->hvm.pages);
Julien Grallc22fe512015-08-10 19:10:38 +0100868 }
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500869 else
Julien Grall89bf4b42015-10-13 17:50:13 +0100870 WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages);
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500871
872 kfree(node);
873 return rv;
874}
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700875
876/**
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700877 * xenbus_read_driver_state
878 * @path: path for driver
879 *
880 * Return the state of the driver rooted at the given store path, or
881 * XenbusStateUnknown if no state can be read.
882 */
883enum xenbus_state xenbus_read_driver_state(const char *path)
884{
885 enum xenbus_state result;
886 int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
887 if (err)
888 result = XenbusStateUnknown;
889
890 return result;
891}
892EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500893
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500894static const struct xenbus_ring_ops ring_ops_hvm = {
Juergen Gross3848e4e2020-07-01 14:16:37 +0200895 .map = xenbus_map_ring_hvm,
896 .unmap = xenbus_unmap_ring_hvm,
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500897};
898
899void __init xenbus_ring_ops_init(void)
900{
Juergen Grossfe9c1c92017-09-14 14:38:58 +0200901#ifdef CONFIG_XEN_PV
Mukesh Rathorbe3e9cf2013-12-31 13:57:35 -0500902 if (!xen_feature(XENFEAT_auto_translated_physmap))
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500903 ring_ops = &ring_ops_pv;
904 else
Juergen Grossfe9c1c92017-09-14 14:38:58 +0200905#endif
Daniel De Graaf2c5d37d2011-12-19 14:55:14 -0500906 ring_ops = &ring_ops_hvm;
907}