KVM: Fix gfn_to_page() acquiring mmap_sem twice
KVM's nopage handler calls gfn_to_page() which acquires the mmap_sem when
calling out to get_user_pages(). nopage handlers are already invoked with the
mmap_sem held though. Introduce a __gfn_to_page() for use by the nopage
handler which requires the lock to already be held.
This was noticed by tglx.
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index ac5ed00..f439e45 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -633,7 +633,10 @@
}
EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
-struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
+/*
+ * Requires current->mm->mmap_sem to be held
+ */
+static struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn)
{
struct kvm_memory_slot *slot;
struct page *page[1];
@@ -648,12 +651,10 @@
return bad_page;
}
- down_read(¤t->mm->mmap_sem);
npages = get_user_pages(current, current->mm,
slot->userspace_addr
+ (gfn - slot->base_gfn) * PAGE_SIZE, 1,
1, 1, page, NULL);
- up_read(¤t->mm->mmap_sem);
if (npages != 1) {
get_page(bad_page);
return bad_page;
@@ -661,6 +662,18 @@
return page[0];
}
+
+struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
+{
+ struct page *page;
+
+ down_read(¤t->mm->mmap_sem);
+ page = __gfn_to_page(kvm, gfn);
+ up_read(¤t->mm->mmap_sem);
+
+ return page;
+}
+
EXPORT_SYMBOL_GPL(gfn_to_page);
void kvm_release_page(struct page *page)
@@ -2621,7 +2634,8 @@
pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
if (!kvm_is_visible_gfn(kvm, pgoff))
return NOPAGE_SIGBUS;
- page = gfn_to_page(kvm, pgoff);
+ /* current->mm->mmap_sem is already held so call lockless version */
+ page = __gfn_to_page(kvm, pgoff);
if (is_error_page(page)) {
kvm_release_page(page);
return NOPAGE_SIGBUS;