s390/kvm: set guest page states to stable on re-ipl

The guest page state needs to be reset to stable for all pages
on initial program load via diagnose 0x308.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index e1408dd..14d43c7 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -22,6 +22,7 @@
 void page_table_free(struct mm_struct *, unsigned long *);
 void page_table_free_rcu(struct mmu_gather *, unsigned long *);
 
+void page_table_reset_pgste(struct mm_struct *, unsigned long, unsigned long);
 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
 			  unsigned long key, bool nq);
 
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 8216c0e..6f9cfa5 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -13,6 +13,7 @@
 
 #include <linux/kvm.h>
 #include <linux/kvm_host.h>
+#include <asm/pgalloc.h>
 #include <asm/virtio-ccw.h>
 #include "kvm-s390.h"
 #include "trace.h"
@@ -86,9 +87,11 @@
 	switch (subcode) {
 	case 3:
 		vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
+		page_table_reset_pgste(current->mm, 0, TASK_SIZE);
 		break;
 	case 4:
 		vcpu->run->s390_reset_flags = 0;
+		page_table_reset_pgste(current->mm, 0, TASK_SIZE);
 		break;
 	default:
 		return -EOPNOTSUPP;
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 9e2b470..9c26b7a 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -879,6 +879,78 @@
 	__free_page(page);
 }
 
+static inline unsigned long page_table_reset_pte(struct mm_struct *mm,
+			pmd_t *pmd, unsigned long addr, unsigned long end)
+{
+	pte_t *start_pte, *pte;
+	spinlock_t *ptl;
+	pgste_t pgste;
+
+	start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+	pte = start_pte;
+	do {
+		pgste = pgste_get_lock(pte);
+		pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
+		pgste_set_unlock(pte, pgste);
+	} while (pte++, addr += PAGE_SIZE, addr != end);
+	pte_unmap_unlock(start_pte, ptl);
+
+	return addr;
+}
+
+static inline unsigned long page_table_reset_pmd(struct mm_struct *mm,
+			pud_t *pud, unsigned long addr, unsigned long end)
+{
+	unsigned long next;
+	pmd_t *pmd;
+
+	pmd = pmd_offset(pud, addr);
+	do {
+		next = pmd_addr_end(addr, end);
+		if (pmd_none_or_clear_bad(pmd))
+			continue;
+		next = page_table_reset_pte(mm, pmd, addr, next);
+	} while (pmd++, addr = next, addr != end);
+
+	return addr;
+}
+
+static inline unsigned long page_table_reset_pud(struct mm_struct *mm,
+			pgd_t *pgd, unsigned long addr, unsigned long end)
+{
+	unsigned long next;
+	pud_t *pud;
+
+	pud = pud_offset(pgd, addr);
+	do {
+		next = pud_addr_end(addr, end);
+		if (pud_none_or_clear_bad(pud))
+			continue;
+		next = page_table_reset_pmd(mm, pud, addr, next);
+	} while (pud++, addr = next, addr != end);
+
+	return addr;
+}
+
+void page_table_reset_pgste(struct mm_struct *mm,
+			unsigned long start, unsigned long end)
+{
+	unsigned long addr, next;
+	pgd_t *pgd;
+
+	addr = start;
+	down_read(&mm->mmap_sem);
+	pgd = pgd_offset(mm, addr);
+	do {
+		next = pgd_addr_end(addr, end);
+		if (pgd_none_or_clear_bad(pgd))
+			continue;
+		next = page_table_reset_pud(mm, pgd, addr, next);
+	} while (pgd++, addr = next, addr != end);
+	up_read(&mm->mmap_sem);
+}
+EXPORT_SYMBOL(page_table_reset_pgste);
+
 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
 			  unsigned long key, bool nq)
 {