mm: hugetlb: soft-offline: dissolve source hugepage after successful migration
Currently hugepage migrated by soft-offline (i.e. due to correctable
memory errors) is contained as a hugepage, which means many non-error
pages in it are unreusable, i.e. wasted.
This patch solves this issue by dissolving source hugepages into buddy.
As done in previous patch, PageHWPoison is set only on a head page of
the error hugepage. Then in dissoliving we move the PageHWPoison flag
to the raw error page so that all healthy subpages return back to buddy.
[arnd@arndb.de: fix warnings: replace some macros with inline functions]
Link: http://lkml.kernel.org/r/20170609102544.2947326-1-arnd@arndb.de
Link: http://lkml.kernel.org/r/1496305019-5493-5-git-send-email-n-horiguchi@ah.jp.nec.com
Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 46bfb70..668ab17 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -472,6 +472,7 @@ static inline pgoff_t basepage_index(struct page *page)
return __basepage_index(page);
}
+extern int dissolve_free_huge_page(struct page *page);
extern int dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn);
static inline bool hugepage_migration_supported(struct hstate *h)
@@ -550,15 +551,37 @@ static inline unsigned int pages_per_huge_page(struct hstate *h)
{
return 1;
}
-#define hstate_index_to_shift(index) 0
-#define hstate_index(h) 0
+
+static inline unsigned hstate_index_to_shift(unsigned index)
+{
+ return 0;
+}
+
+static inline int hstate_index(struct hstate *h)
+{
+ return 0;
+}
static inline pgoff_t basepage_index(struct page *page)
{
return page->index;
}
-#define dissolve_free_huge_pages(s, e) 0
-#define hugepage_migration_supported(h) false
+
+static inline int dissolve_free_huge_page(struct page *page)
+{
+ return 0;
+}
+
+static inline int dissolve_free_huge_pages(unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+ return 0;
+}
+
+static inline bool hugepage_migration_supported(struct hstate *h)
+{
+ return false;
+}
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
struct mm_struct *mm, pte_t *pte)