blob: 3acf3b8eb469db315af618b8d841fc2ea5d2b169 [file] [log] [blame]
Sean Christopherson6ca9a6f2020-06-22 13:20:31 -07001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __KVM_X86_MMU_INTERNAL_H
3#define __KVM_X86_MMU_INTERNAL_H
4
Sean Christopherson985ab272020-06-22 13:20:32 -07005#include <linux/types.h>
6
7#include <asm/kvm_host.h>
8
9struct kvm_mmu_page {
10 struct list_head link;
11 struct hlist_node hash_link;
12 struct list_head lpage_disallowed_link;
13
14 bool unsync;
15 u8 mmu_valid_gen;
16 bool mmio_cached;
17 bool lpage_disallowed; /* Can't be replaced by an equiv large page */
18
19 /*
20 * The following two entries are used to key the shadow page in the
21 * hash table.
22 */
23 union kvm_mmu_page_role role;
24 gfn_t gfn;
25
26 u64 *spt;
27 /* hold the gfn of each spte inside spt */
28 gfn_t *gfns;
29 int root_count; /* Currently serving as active root */
30 unsigned int unsync_children;
31 struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
32 DECLARE_BITMAP(unsync_child_bitmap, 512);
33
34#ifdef CONFIG_X86_32
35 /*
36 * Used out of the mmu-lock to avoid reading spte values while an
37 * update is in progress; see the comments in __get_spte_lockless().
38 */
39 int clear_spte_count;
40#endif
41
42 /* Number of writes since the last time traversal visited this page. */
43 atomic_t write_flooding_count;
44};
45
Sean Christophersone47c4ae2020-06-22 13:20:34 -070046static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page)
Sean Christopherson985ab272020-06-22 13:20:32 -070047{
48 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
49
50 return (struct kvm_mmu_page *)page_private(page);
51}
52
Sean Christopherson57354682020-06-22 13:20:33 -070053static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
54{
Sean Christophersone47c4ae2020-06-22 13:20:34 -070055 return to_shadow_page(__pa(sptep));
Sean Christopherson57354682020-06-22 13:20:33 -070056}
57
Sean Christopherson6ca9a6f2020-06-22 13:20:31 -070058void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
59void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
60bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
61 struct kvm_memory_slot *slot, u64 gfn);
62
63#endif /* __KVM_X86_MMU_INTERNAL_H */