David Woodhouse | 982ed0d | 2021-12-10 16:36:21 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | |
| 3 | #ifndef __KVM_MM_H__ |
| 4 | #define __KVM_MM_H__ 1 |
| 5 | |
| 6 | /* |
| 7 | * Architectures can choose whether to use an rwlock or spinlock |
| 8 | * for the mmu_lock. These macros, for use in common code |
| 9 | * only, avoids using #ifdefs in places that must deal with |
| 10 | * multiple architectures. |
| 11 | */ |
| 12 | |
| 13 | #ifdef KVM_HAVE_MMU_RWLOCK |
| 14 | #define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock) |
| 15 | #define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock) |
| 16 | #define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock) |
| 17 | #define KVM_MMU_READ_LOCK(kvm) read_lock(&(kvm)->mmu_lock) |
| 18 | #define KVM_MMU_READ_UNLOCK(kvm) read_unlock(&(kvm)->mmu_lock) |
| 19 | #else |
| 20 | #define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock) |
| 21 | #define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock) |
| 22 | #define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock) |
| 23 | #define KVM_MMU_READ_LOCK(kvm) spin_lock(&(kvm)->mmu_lock) |
| 24 | #define KVM_MMU_READ_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock) |
| 25 | #endif /* KVM_HAVE_MMU_RWLOCK */ |
| 26 | |
| 27 | kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, |
| 28 | bool write_fault, bool *writable); |
| 29 | |
| 30 | #ifdef CONFIG_HAVE_KVM_PFNCACHE |
| 31 | void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, |
| 32 | unsigned long start, |
| 33 | unsigned long end, |
| 34 | bool may_block); |
| 35 | #else |
| 36 | static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, |
| 37 | unsigned long start, |
| 38 | unsigned long end, |
| 39 | bool may_block) |
| 40 | { |
| 41 | } |
| 42 | #endif /* HAVE_KVM_PFNCACHE */ |
| 43 | |
| 44 | #endif /* __KVM_MM_H__ */ |