Oleksandr Andrushchenko | ae4c51a | 2018-07-20 12:01:44 +0300 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | |
| 3 | /* |
| 4 | * Xen memory reservation utilities. |
| 5 | * |
| 6 | * Copyright (c) 2003, B Dragovic |
| 7 | * Copyright (c) 2003-2004, M Williamson, K Fraser |
| 8 | * Copyright (c) 2005 Dan M. Smith, IBM Corporation |
| 9 | * Copyright (c) 2010 Daniel Kiper |
| 10 | * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc. |
| 11 | */ |
| 12 | |
| 13 | #ifndef _XENMEM_RESERVATION_H |
| 14 | #define _XENMEM_RESERVATION_H |
| 15 | |
| 16 | #include <linux/highmem.h> |
| 17 | |
| 18 | #include <xen/page.h> |
| 19 | |
| 20 | static inline void xenmem_reservation_scrub_page(struct page *page) |
| 21 | { |
| 22 | #ifdef CONFIG_XEN_SCRUB_PAGES |
| 23 | clear_highpage(page); |
| 24 | #endif |
| 25 | } |
| 26 | |
| 27 | #ifdef CONFIG_XEN_HAVE_PVMMU |
| 28 | void __xenmem_reservation_va_mapping_update(unsigned long count, |
| 29 | struct page **pages, |
| 30 | xen_pfn_t *frames); |
| 31 | |
| 32 | void __xenmem_reservation_va_mapping_reset(unsigned long count, |
| 33 | struct page **pages); |
| 34 | #endif |
| 35 | |
| 36 | static inline void xenmem_reservation_va_mapping_update(unsigned long count, |
| 37 | struct page **pages, |
| 38 | xen_pfn_t *frames) |
| 39 | { |
| 40 | #ifdef CONFIG_XEN_HAVE_PVMMU |
| 41 | if (!xen_feature(XENFEAT_auto_translated_physmap)) |
| 42 | __xenmem_reservation_va_mapping_update(count, pages, frames); |
| 43 | #endif |
| 44 | } |
| 45 | |
| 46 | static inline void xenmem_reservation_va_mapping_reset(unsigned long count, |
| 47 | struct page **pages) |
| 48 | { |
| 49 | #ifdef CONFIG_XEN_HAVE_PVMMU |
| 50 | if (!xen_feature(XENFEAT_auto_translated_physmap)) |
| 51 | __xenmem_reservation_va_mapping_reset(count, pages); |
| 52 | #endif |
| 53 | } |
| 54 | |
| 55 | int xenmem_reservation_increase(int count, xen_pfn_t *frames); |
| 56 | |
| 57 | int xenmem_reservation_decrease(int count, xen_pfn_t *frames); |
| 58 | |
| 59 | #endif |