Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2009 Red Hat, Inc. |
| 3 | * |
| 4 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 5 | * the COPYING file in the top-level directory. |
| 6 | */ |
| 7 | |
Andrew Morton | ae3a8c1 | 2014-06-04 16:06:58 -0700 | [diff] [blame] | 8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 9 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 10 | #include <linux/mm.h> |
| 11 | #include <linux/sched.h> |
Ingo Molnar | f7ccbae | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 12 | #include <linux/sched/coredump.h> |
Ingo Molnar | 6a3827d | 2017-02-08 18:51:31 +0100 | [diff] [blame] | 13 | #include <linux/sched/numa_balancing.h> |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 14 | #include <linux/highmem.h> |
| 15 | #include <linux/hugetlb.h> |
| 16 | #include <linux/mmu_notifier.h> |
| 17 | #include <linux/rmap.h> |
| 18 | #include <linux/swap.h> |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 19 | #include <linux/shrinker.h> |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 20 | #include <linux/mm_inline.h> |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 21 | #include <linux/swapops.h> |
Matthew Wilcox | 4897c76 | 2015-09-08 14:58:45 -0700 | [diff] [blame] | 22 | #include <linux/dax.h> |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 23 | #include <linux/khugepaged.h> |
Andrea Arcangeli | 878aee7 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 24 | #include <linux/freezer.h> |
Dan Williams | f25748e3 | 2016-01-15 16:56:43 -0800 | [diff] [blame] | 25 | #include <linux/pfn_t.h> |
Andrea Arcangeli | a664b2d | 2011-01-13 15:47:17 -0800 | [diff] [blame] | 26 | #include <linux/mman.h> |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 27 | #include <linux/memremap.h> |
Ralf Baechle | 325adeb | 2012-10-15 13:44:56 +0200 | [diff] [blame] | 28 | #include <linux/pagemap.h> |
Kirill A. Shutemov | 49071d4 | 2016-01-15 16:54:40 -0800 | [diff] [blame] | 29 | #include <linux/debugfs.h> |
Mel Gorman | 4daae3b | 2012-11-02 11:33:45 +0000 | [diff] [blame] | 30 | #include <linux/migrate.h> |
Sasha Levin | 43b5fbb | 2013-02-22 16:32:27 -0800 | [diff] [blame] | 31 | #include <linux/hashtable.h> |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 32 | #include <linux/userfaultfd_k.h> |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 33 | #include <linux/page_idle.h> |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 34 | #include <linux/shmem_fs.h> |
Michal Hocko | 6b31d59 | 2017-08-18 15:16:15 -0700 | [diff] [blame] | 35 | #include <linux/oom.h> |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 36 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 37 | #include <asm/tlb.h> |
| 38 | #include <asm/pgalloc.h> |
| 39 | #include "internal.h" |
| 40 | |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 41 | /* |
Michael DeGuzis | b14d595 | 2017-05-17 15:19:21 -0400 | [diff] [blame] | 42 | * By default, transparent hugepage support is disabled in order to avoid |
| 43 | * risking an increased memory footprint for applications that are not |
| 44 | * guaranteed to benefit from it. When transparent hugepage support is |
| 45 | * enabled, it is for all mappings, and khugepaged scans all mappings. |
Jianguo Wu | 8bfa3f9 | 2013-11-12 15:07:16 -0800 | [diff] [blame] | 46 | * Defrag is invoked by khugepaged hugepage allocations and by page faults |
| 47 | * for all hugepage allocations. |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 48 | */ |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 49 | unsigned long transparent_hugepage_flags __read_mostly = |
Andrea Arcangeli | 13ece88 | 2011-01-13 15:47:07 -0800 | [diff] [blame] | 50 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 51 | (1<<TRANSPARENT_HUGEPAGE_FLAG)| |
Andrea Arcangeli | 13ece88 | 2011-01-13 15:47:07 -0800 | [diff] [blame] | 52 | #endif |
| 53 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE |
| 54 | (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| |
| 55 | #endif |
Mel Gorman | 444eb2a4 | 2016-03-17 14:19:23 -0700 | [diff] [blame] | 56 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)| |
Kirill A. Shutemov | 79da540 | 2012-12-12 13:51:12 -0800 | [diff] [blame] | 57 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| |
| 58 | (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 59 | |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 60 | static struct shrinker deferred_split_shrinker; |
Andrea Arcangeli | f000565 | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 61 | |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 62 | static atomic_t huge_zero_refcount; |
Wang, Yalin | 56873f4 | 2015-02-11 15:24:51 -0800 | [diff] [blame] | 63 | struct page *huge_zero_page __read_mostly; |
Kirill A. Shutemov | 4a6c129 | 2012-12-12 13:50:47 -0800 | [diff] [blame] | 64 | |
Aaron Lu | 6fcb52a | 2016-10-07 17:00:08 -0700 | [diff] [blame] | 65 | static struct page *get_huge_zero_page(void) |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 66 | { |
| 67 | struct page *zero_page; |
| 68 | retry: |
| 69 | if (likely(atomic_inc_not_zero(&huge_zero_refcount))) |
Jason Low | 4db0c3c | 2015-04-15 16:14:08 -0700 | [diff] [blame] | 70 | return READ_ONCE(huge_zero_page); |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 71 | |
| 72 | zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, |
| 73 | HPAGE_PMD_ORDER); |
Kirill A. Shutemov | d8a8e1f | 2012-12-12 13:51:09 -0800 | [diff] [blame] | 74 | if (!zero_page) { |
| 75 | count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); |
Kirill A. Shutemov | 5918d10 | 2013-04-29 15:08:44 -0700 | [diff] [blame] | 76 | return NULL; |
Kirill A. Shutemov | d8a8e1f | 2012-12-12 13:51:09 -0800 | [diff] [blame] | 77 | } |
| 78 | count_vm_event(THP_ZERO_PAGE_ALLOC); |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 79 | preempt_disable(); |
Kirill A. Shutemov | 5918d10 | 2013-04-29 15:08:44 -0700 | [diff] [blame] | 80 | if (cmpxchg(&huge_zero_page, NULL, zero_page)) { |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 81 | preempt_enable(); |
Yu Zhao | 5ddacbe | 2014-10-29 14:50:26 -0700 | [diff] [blame] | 82 | __free_pages(zero_page, compound_order(zero_page)); |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 83 | goto retry; |
| 84 | } |
| 85 | |
| 86 | /* We take additional reference here. It will be put back by shrinker */ |
| 87 | atomic_set(&huge_zero_refcount, 2); |
| 88 | preempt_enable(); |
Jason Low | 4db0c3c | 2015-04-15 16:14:08 -0700 | [diff] [blame] | 89 | return READ_ONCE(huge_zero_page); |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 90 | } |
| 91 | |
Aaron Lu | 6fcb52a | 2016-10-07 17:00:08 -0700 | [diff] [blame] | 92 | static void put_huge_zero_page(void) |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 93 | { |
| 94 | /* |
| 95 | * Counter should never go to zero here. Only shrinker can put |
| 96 | * last reference. |
| 97 | */ |
| 98 | BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); |
| 99 | } |
| 100 | |
Aaron Lu | 6fcb52a | 2016-10-07 17:00:08 -0700 | [diff] [blame] | 101 | struct page *mm_get_huge_zero_page(struct mm_struct *mm) |
| 102 | { |
| 103 | if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) |
| 104 | return READ_ONCE(huge_zero_page); |
| 105 | |
| 106 | if (!get_huge_zero_page()) |
| 107 | return NULL; |
| 108 | |
| 109 | if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) |
| 110 | put_huge_zero_page(); |
| 111 | |
| 112 | return READ_ONCE(huge_zero_page); |
| 113 | } |
| 114 | |
| 115 | void mm_put_huge_zero_page(struct mm_struct *mm) |
| 116 | { |
| 117 | if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) |
| 118 | put_huge_zero_page(); |
| 119 | } |
| 120 | |
Glauber Costa | 4889646 | 2013-08-28 10:18:15 +1000 | [diff] [blame] | 121 | static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink, |
| 122 | struct shrink_control *sc) |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 123 | { |
Glauber Costa | 4889646 | 2013-08-28 10:18:15 +1000 | [diff] [blame] | 124 | /* we can free zero page only if last reference remains */ |
| 125 | return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; |
| 126 | } |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 127 | |
Glauber Costa | 4889646 | 2013-08-28 10:18:15 +1000 | [diff] [blame] | 128 | static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, |
| 129 | struct shrink_control *sc) |
| 130 | { |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 131 | if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { |
Kirill A. Shutemov | 5918d10 | 2013-04-29 15:08:44 -0700 | [diff] [blame] | 132 | struct page *zero_page = xchg(&huge_zero_page, NULL); |
| 133 | BUG_ON(zero_page == NULL); |
Yu Zhao | 5ddacbe | 2014-10-29 14:50:26 -0700 | [diff] [blame] | 134 | __free_pages(zero_page, compound_order(zero_page)); |
Glauber Costa | 4889646 | 2013-08-28 10:18:15 +1000 | [diff] [blame] | 135 | return HPAGE_PMD_NR; |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 136 | } |
| 137 | |
| 138 | return 0; |
| 139 | } |
| 140 | |
| 141 | static struct shrinker huge_zero_page_shrinker = { |
Glauber Costa | 4889646 | 2013-08-28 10:18:15 +1000 | [diff] [blame] | 142 | .count_objects = shrink_huge_zero_page_count, |
| 143 | .scan_objects = shrink_huge_zero_page_scan, |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 144 | .seeks = DEFAULT_SEEKS, |
| 145 | }; |
| 146 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 147 | #ifdef CONFIG_SYSFS |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 148 | static ssize_t enabled_show(struct kobject *kobj, |
| 149 | struct kobj_attribute *attr, char *buf) |
| 150 | { |
Mel Gorman | 444eb2a4 | 2016-03-17 14:19:23 -0700 | [diff] [blame] | 151 | if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags)) |
| 152 | return sprintf(buf, "[always] madvise never\n"); |
| 153 | else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags)) |
| 154 | return sprintf(buf, "always [madvise] never\n"); |
| 155 | else |
| 156 | return sprintf(buf, "always madvise [never]\n"); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 157 | } |
Mel Gorman | 444eb2a4 | 2016-03-17 14:19:23 -0700 | [diff] [blame] | 158 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 159 | static ssize_t enabled_store(struct kobject *kobj, |
| 160 | struct kobj_attribute *attr, |
| 161 | const char *buf, size_t count) |
| 162 | { |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 163 | ssize_t ret = count; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 164 | |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 165 | if (!memcmp("always", buf, |
| 166 | min(sizeof("always")-1, count))) { |
| 167 | clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); |
| 168 | set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); |
| 169 | } else if (!memcmp("madvise", buf, |
| 170 | min(sizeof("madvise")-1, count))) { |
| 171 | clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); |
| 172 | set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); |
| 173 | } else if (!memcmp("never", buf, |
| 174 | min(sizeof("never")-1, count))) { |
| 175 | clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); |
| 176 | clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); |
| 177 | } else |
| 178 | ret = -EINVAL; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 179 | |
| 180 | if (ret > 0) { |
Kirill A. Shutemov | b46e756 | 2016-07-26 15:26:24 -0700 | [diff] [blame] | 181 | int err = start_stop_khugepaged(); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 182 | if (err) |
| 183 | ret = err; |
| 184 | } |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 185 | return ret; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 186 | } |
| 187 | static struct kobj_attribute enabled_attr = |
| 188 | __ATTR(enabled, 0644, enabled_show, enabled_store); |
| 189 | |
Kirill A. Shutemov | b46e756 | 2016-07-26 15:26:24 -0700 | [diff] [blame] | 190 | ssize_t single_hugepage_flag_show(struct kobject *kobj, |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 191 | struct kobj_attribute *attr, char *buf, |
| 192 | enum transparent_hugepage_flag flag) |
| 193 | { |
Ben Hutchings | e27e615 | 2011-04-14 15:22:21 -0700 | [diff] [blame] | 194 | return sprintf(buf, "%d\n", |
| 195 | !!test_bit(flag, &transparent_hugepage_flags)); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 196 | } |
Ben Hutchings | e27e615 | 2011-04-14 15:22:21 -0700 | [diff] [blame] | 197 | |
Kirill A. Shutemov | b46e756 | 2016-07-26 15:26:24 -0700 | [diff] [blame] | 198 | ssize_t single_hugepage_flag_store(struct kobject *kobj, |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 199 | struct kobj_attribute *attr, |
| 200 | const char *buf, size_t count, |
| 201 | enum transparent_hugepage_flag flag) |
| 202 | { |
Ben Hutchings | e27e615 | 2011-04-14 15:22:21 -0700 | [diff] [blame] | 203 | unsigned long value; |
| 204 | int ret; |
| 205 | |
| 206 | ret = kstrtoul(buf, 10, &value); |
| 207 | if (ret < 0) |
| 208 | return ret; |
| 209 | if (value > 1) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 210 | return -EINVAL; |
| 211 | |
Ben Hutchings | e27e615 | 2011-04-14 15:22:21 -0700 | [diff] [blame] | 212 | if (value) |
| 213 | set_bit(flag, &transparent_hugepage_flags); |
| 214 | else |
| 215 | clear_bit(flag, &transparent_hugepage_flags); |
| 216 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 217 | return count; |
| 218 | } |
| 219 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 220 | static ssize_t defrag_show(struct kobject *kobj, |
| 221 | struct kobj_attribute *attr, char *buf) |
| 222 | { |
Mel Gorman | 444eb2a4 | 2016-03-17 14:19:23 -0700 | [diff] [blame] | 223 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 224 | return sprintf(buf, "[always] defer defer+madvise madvise never\n"); |
Mel Gorman | 444eb2a4 | 2016-03-17 14:19:23 -0700 | [diff] [blame] | 225 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 226 | return sprintf(buf, "always [defer] defer+madvise madvise never\n"); |
| 227 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) |
| 228 | return sprintf(buf, "always defer [defer+madvise] madvise never\n"); |
| 229 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) |
| 230 | return sprintf(buf, "always defer defer+madvise [madvise] never\n"); |
| 231 | return sprintf(buf, "always defer defer+madvise madvise [never]\n"); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 232 | } |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 233 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 234 | static ssize_t defrag_store(struct kobject *kobj, |
| 235 | struct kobj_attribute *attr, |
| 236 | const char *buf, size_t count) |
| 237 | { |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 238 | if (!memcmp("always", buf, |
| 239 | min(sizeof("always")-1, count))) { |
| 240 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); |
| 241 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); |
| 242 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); |
| 243 | set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 244 | } else if (!memcmp("defer+madvise", buf, |
| 245 | min(sizeof("defer+madvise")-1, count))) { |
| 246 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); |
| 247 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); |
| 248 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); |
| 249 | set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); |
David Rientjes | 4fad7fb | 2017-04-07 16:04:54 -0700 | [diff] [blame] | 250 | } else if (!memcmp("defer", buf, |
| 251 | min(sizeof("defer")-1, count))) { |
| 252 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); |
| 253 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); |
| 254 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); |
| 255 | set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 256 | } else if (!memcmp("madvise", buf, |
| 257 | min(sizeof("madvise")-1, count))) { |
| 258 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); |
| 259 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); |
| 260 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); |
| 261 | set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); |
| 262 | } else if (!memcmp("never", buf, |
| 263 | min(sizeof("never")-1, count))) { |
| 264 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); |
| 265 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); |
| 266 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); |
| 267 | clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); |
| 268 | } else |
| 269 | return -EINVAL; |
| 270 | |
| 271 | return count; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 272 | } |
| 273 | static struct kobj_attribute defrag_attr = |
| 274 | __ATTR(defrag, 0644, defrag_show, defrag_store); |
| 275 | |
Kirill A. Shutemov | 79da540 | 2012-12-12 13:51:12 -0800 | [diff] [blame] | 276 | static ssize_t use_zero_page_show(struct kobject *kobj, |
| 277 | struct kobj_attribute *attr, char *buf) |
| 278 | { |
Kirill A. Shutemov | b46e756 | 2016-07-26 15:26:24 -0700 | [diff] [blame] | 279 | return single_hugepage_flag_show(kobj, attr, buf, |
Kirill A. Shutemov | 79da540 | 2012-12-12 13:51:12 -0800 | [diff] [blame] | 280 | TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); |
| 281 | } |
| 282 | static ssize_t use_zero_page_store(struct kobject *kobj, |
| 283 | struct kobj_attribute *attr, const char *buf, size_t count) |
| 284 | { |
Kirill A. Shutemov | b46e756 | 2016-07-26 15:26:24 -0700 | [diff] [blame] | 285 | return single_hugepage_flag_store(kobj, attr, buf, count, |
Kirill A. Shutemov | 79da540 | 2012-12-12 13:51:12 -0800 | [diff] [blame] | 286 | TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); |
| 287 | } |
| 288 | static struct kobj_attribute use_zero_page_attr = |
| 289 | __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store); |
Hugh Dickins | 49920d2 | 2016-12-12 16:44:50 -0800 | [diff] [blame] | 290 | |
| 291 | static ssize_t hpage_pmd_size_show(struct kobject *kobj, |
| 292 | struct kobj_attribute *attr, char *buf) |
| 293 | { |
| 294 | return sprintf(buf, "%lu\n", HPAGE_PMD_SIZE); |
| 295 | } |
| 296 | static struct kobj_attribute hpage_pmd_size_attr = |
| 297 | __ATTR_RO(hpage_pmd_size); |
| 298 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 299 | #ifdef CONFIG_DEBUG_VM |
| 300 | static ssize_t debug_cow_show(struct kobject *kobj, |
| 301 | struct kobj_attribute *attr, char *buf) |
| 302 | { |
Kirill A. Shutemov | b46e756 | 2016-07-26 15:26:24 -0700 | [diff] [blame] | 303 | return single_hugepage_flag_show(kobj, attr, buf, |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 304 | TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); |
| 305 | } |
| 306 | static ssize_t debug_cow_store(struct kobject *kobj, |
| 307 | struct kobj_attribute *attr, |
| 308 | const char *buf, size_t count) |
| 309 | { |
Kirill A. Shutemov | b46e756 | 2016-07-26 15:26:24 -0700 | [diff] [blame] | 310 | return single_hugepage_flag_store(kobj, attr, buf, count, |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 311 | TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); |
| 312 | } |
| 313 | static struct kobj_attribute debug_cow_attr = |
| 314 | __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store); |
| 315 | #endif /* CONFIG_DEBUG_VM */ |
| 316 | |
| 317 | static struct attribute *hugepage_attr[] = { |
| 318 | &enabled_attr.attr, |
| 319 | &defrag_attr.attr, |
Kirill A. Shutemov | 79da540 | 2012-12-12 13:51:12 -0800 | [diff] [blame] | 320 | &use_zero_page_attr.attr, |
Hugh Dickins | 49920d2 | 2016-12-12 16:44:50 -0800 | [diff] [blame] | 321 | &hpage_pmd_size_attr.attr, |
Kirill A. Shutemov | e496cf3 | 2016-07-26 15:26:35 -0700 | [diff] [blame] | 322 | #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 323 | &shmem_enabled_attr.attr, |
| 324 | #endif |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 325 | #ifdef CONFIG_DEBUG_VM |
| 326 | &debug_cow_attr.attr, |
| 327 | #endif |
| 328 | NULL, |
| 329 | }; |
| 330 | |
Arvind Yadav | 8aa95a2 | 2017-09-06 16:22:03 -0700 | [diff] [blame] | 331 | static const struct attribute_group hugepage_attr_group = { |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 332 | .attrs = hugepage_attr, |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 333 | }; |
| 334 | |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 335 | static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) |
| 336 | { |
| 337 | int err; |
| 338 | |
| 339 | *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); |
| 340 | if (unlikely(!*hugepage_kobj)) { |
Andrew Morton | ae3a8c1 | 2014-06-04 16:06:58 -0700 | [diff] [blame] | 341 | pr_err("failed to create transparent hugepage kobject\n"); |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 342 | return -ENOMEM; |
| 343 | } |
| 344 | |
| 345 | err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); |
| 346 | if (err) { |
Andrew Morton | ae3a8c1 | 2014-06-04 16:06:58 -0700 | [diff] [blame] | 347 | pr_err("failed to register transparent hugepage group\n"); |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 348 | goto delete_obj; |
| 349 | } |
| 350 | |
| 351 | err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); |
| 352 | if (err) { |
Andrew Morton | ae3a8c1 | 2014-06-04 16:06:58 -0700 | [diff] [blame] | 353 | pr_err("failed to register transparent hugepage group\n"); |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 354 | goto remove_hp_group; |
| 355 | } |
| 356 | |
| 357 | return 0; |
| 358 | |
| 359 | remove_hp_group: |
| 360 | sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); |
| 361 | delete_obj: |
| 362 | kobject_put(*hugepage_kobj); |
| 363 | return err; |
| 364 | } |
| 365 | |
| 366 | static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) |
| 367 | { |
| 368 | sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); |
| 369 | sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); |
| 370 | kobject_put(hugepage_kobj); |
| 371 | } |
| 372 | #else |
| 373 | static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) |
| 374 | { |
| 375 | return 0; |
| 376 | } |
| 377 | |
| 378 | static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) |
| 379 | { |
| 380 | } |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 381 | #endif /* CONFIG_SYSFS */ |
| 382 | |
| 383 | static int __init hugepage_init(void) |
| 384 | { |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 385 | int err; |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 386 | struct kobject *hugepage_kobj; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 387 | |
Andrea Arcangeli | 4b7167b | 2011-01-13 15:47:09 -0800 | [diff] [blame] | 388 | if (!has_transparent_hugepage()) { |
| 389 | transparent_hugepage_flags = 0; |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 390 | return -EINVAL; |
Andrea Arcangeli | 4b7167b | 2011-01-13 15:47:09 -0800 | [diff] [blame] | 391 | } |
| 392 | |
Kirill A. Shutemov | ff20c2e | 2016-03-01 09:45:14 +0530 | [diff] [blame] | 393 | /* |
| 394 | * hugepages can't be allocated by the buddy allocator |
| 395 | */ |
| 396 | MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER); |
| 397 | /* |
| 398 | * we use page->mapping and page->index in second tail page |
| 399 | * as list_head: assuming THP order >= 2 |
| 400 | */ |
| 401 | MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2); |
| 402 | |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 403 | err = hugepage_init_sysfs(&hugepage_kobj); |
| 404 | if (err) |
Kirill A. Shutemov | 65ebb64 | 2015-04-15 16:14:20 -0700 | [diff] [blame] | 405 | goto err_sysfs; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 406 | |
Kirill A. Shutemov | b46e756 | 2016-07-26 15:26:24 -0700 | [diff] [blame] | 407 | err = khugepaged_init(); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 408 | if (err) |
Kirill A. Shutemov | 65ebb64 | 2015-04-15 16:14:20 -0700 | [diff] [blame] | 409 | goto err_slab; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 410 | |
Kirill A. Shutemov | 65ebb64 | 2015-04-15 16:14:20 -0700 | [diff] [blame] | 411 | err = register_shrinker(&huge_zero_page_shrinker); |
| 412 | if (err) |
| 413 | goto err_hzp_shrinker; |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 414 | err = register_shrinker(&deferred_split_shrinker); |
| 415 | if (err) |
| 416 | goto err_split_shrinker; |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 417 | |
Rik van Riel | 97562cd | 2011-01-13 15:47:12 -0800 | [diff] [blame] | 418 | /* |
| 419 | * By default disable transparent hugepages on smaller systems, |
| 420 | * where the extra memory used could hurt more than TLB overhead |
| 421 | * is likely to save. The admin can still enable it through /sys. |
| 422 | */ |
Kirill A. Shutemov | 79553da293 | 2015-04-15 16:14:56 -0700 | [diff] [blame] | 423 | if (totalram_pages < (512 << (20 - PAGE_SHIFT))) { |
Rik van Riel | 97562cd | 2011-01-13 15:47:12 -0800 | [diff] [blame] | 424 | transparent_hugepage_flags = 0; |
Kirill A. Shutemov | 79553da293 | 2015-04-15 16:14:56 -0700 | [diff] [blame] | 425 | return 0; |
| 426 | } |
Rik van Riel | 97562cd | 2011-01-13 15:47:12 -0800 | [diff] [blame] | 427 | |
Kirill A. Shutemov | 79553da293 | 2015-04-15 16:14:56 -0700 | [diff] [blame] | 428 | err = start_stop_khugepaged(); |
Kirill A. Shutemov | 65ebb64 | 2015-04-15 16:14:20 -0700 | [diff] [blame] | 429 | if (err) |
| 430 | goto err_khugepaged; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 431 | |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 432 | return 0; |
Kirill A. Shutemov | 65ebb64 | 2015-04-15 16:14:20 -0700 | [diff] [blame] | 433 | err_khugepaged: |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 434 | unregister_shrinker(&deferred_split_shrinker); |
| 435 | err_split_shrinker: |
Kirill A. Shutemov | 65ebb64 | 2015-04-15 16:14:20 -0700 | [diff] [blame] | 436 | unregister_shrinker(&huge_zero_page_shrinker); |
| 437 | err_hzp_shrinker: |
Kirill A. Shutemov | b46e756 | 2016-07-26 15:26:24 -0700 | [diff] [blame] | 438 | khugepaged_destroy(); |
Kirill A. Shutemov | 65ebb64 | 2015-04-15 16:14:20 -0700 | [diff] [blame] | 439 | err_slab: |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 440 | hugepage_exit_sysfs(hugepage_kobj); |
Kirill A. Shutemov | 65ebb64 | 2015-04-15 16:14:20 -0700 | [diff] [blame] | 441 | err_sysfs: |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 442 | return err; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 443 | } |
Paul Gortmaker | a64fb3c | 2014-01-23 15:53:30 -0800 | [diff] [blame] | 444 | subsys_initcall(hugepage_init); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 445 | |
| 446 | static int __init setup_transparent_hugepage(char *str) |
| 447 | { |
| 448 | int ret = 0; |
| 449 | if (!str) |
| 450 | goto out; |
| 451 | if (!strcmp(str, "always")) { |
| 452 | set_bit(TRANSPARENT_HUGEPAGE_FLAG, |
| 453 | &transparent_hugepage_flags); |
| 454 | clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, |
| 455 | &transparent_hugepage_flags); |
| 456 | ret = 1; |
| 457 | } else if (!strcmp(str, "madvise")) { |
| 458 | clear_bit(TRANSPARENT_HUGEPAGE_FLAG, |
| 459 | &transparent_hugepage_flags); |
| 460 | set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, |
| 461 | &transparent_hugepage_flags); |
| 462 | ret = 1; |
| 463 | } else if (!strcmp(str, "never")) { |
| 464 | clear_bit(TRANSPARENT_HUGEPAGE_FLAG, |
| 465 | &transparent_hugepage_flags); |
| 466 | clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, |
| 467 | &transparent_hugepage_flags); |
| 468 | ret = 1; |
| 469 | } |
| 470 | out: |
| 471 | if (!ret) |
Andrew Morton | ae3a8c1 | 2014-06-04 16:06:58 -0700 | [diff] [blame] | 472 | pr_warn("transparent_hugepage= cannot parse, ignored\n"); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 473 | return ret; |
| 474 | } |
| 475 | __setup("transparent_hugepage=", setup_transparent_hugepage); |
| 476 | |
Linus Torvalds | f55e101 | 2017-11-29 09:01:01 -0800 | [diff] [blame] | 477 | pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 478 | { |
Linus Torvalds | f55e101 | 2017-11-29 09:01:01 -0800 | [diff] [blame] | 479 | if (likely(vma->vm_flags & VM_WRITE)) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 480 | pmd = pmd_mkwrite(pmd); |
| 481 | return pmd; |
| 482 | } |
| 483 | |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 484 | static inline struct list_head *page_deferred_list(struct page *page) |
| 485 | { |
Matthew Wilcox | fa3015b | 2018-06-07 17:08:42 -0700 | [diff] [blame] | 486 | /* ->lru in the tail pages is occupied by compound_head. */ |
| 487 | return &page[2].deferred_list; |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 488 | } |
| 489 | |
| 490 | void prep_transhuge_page(struct page *page) |
| 491 | { |
| 492 | /* |
| 493 | * we use page->mapping and page->indexlru in second tail page |
| 494 | * as list_head: assuming THP order >= 2 |
| 495 | */ |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 496 | |
| 497 | INIT_LIST_HEAD(page_deferred_list(page)); |
| 498 | set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR); |
| 499 | } |
| 500 | |
Toshi Kani | 74d2fad | 2016-10-07 16:59:56 -0700 | [diff] [blame] | 501 | unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long len, |
| 502 | loff_t off, unsigned long flags, unsigned long size) |
| 503 | { |
| 504 | unsigned long addr; |
| 505 | loff_t off_end = off + len; |
| 506 | loff_t off_align = round_up(off, size); |
| 507 | unsigned long len_pad; |
| 508 | |
| 509 | if (off_end <= off_align || (off_end - off_align) < size) |
| 510 | return 0; |
| 511 | |
| 512 | len_pad = len + size; |
| 513 | if (len_pad < len || (off + len_pad) < off) |
| 514 | return 0; |
| 515 | |
| 516 | addr = current->mm->get_unmapped_area(filp, 0, len_pad, |
| 517 | off >> PAGE_SHIFT, flags); |
| 518 | if (IS_ERR_VALUE(addr)) |
| 519 | return 0; |
| 520 | |
| 521 | addr += (off - addr) & (size - 1); |
| 522 | return addr; |
| 523 | } |
| 524 | |
| 525 | unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, |
| 526 | unsigned long len, unsigned long pgoff, unsigned long flags) |
| 527 | { |
| 528 | loff_t off = (loff_t)pgoff << PAGE_SHIFT; |
| 529 | |
| 530 | if (addr) |
| 531 | goto out; |
| 532 | if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD)) |
| 533 | goto out; |
| 534 | |
| 535 | addr = __thp_get_unmapped_area(filp, len, off, flags, PMD_SIZE); |
| 536 | if (addr) |
| 537 | return addr; |
| 538 | |
| 539 | out: |
| 540 | return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); |
| 541 | } |
| 542 | EXPORT_SYMBOL_GPL(thp_get_unmapped_area); |
| 543 | |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 544 | static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page, |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 545 | gfp_t gfp) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 546 | { |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 547 | struct vm_area_struct *vma = vmf->vma; |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 548 | struct mem_cgroup *memcg; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 549 | pgtable_t pgtable; |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 550 | unsigned long haddr = vmf->address & HPAGE_PMD_MASK; |
Michal Hocko | 6b31d59 | 2017-08-18 15:16:15 -0700 | [diff] [blame] | 551 | int ret = 0; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 552 | |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 553 | VM_BUG_ON_PAGE(!PageCompound(page), page); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 554 | |
Tejun Heo | 2cf8558 | 2018-07-03 11:14:56 -0400 | [diff] [blame] | 555 | if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg, true)) { |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 556 | put_page(page); |
| 557 | count_vm_event(THP_FAULT_FALLBACK); |
| 558 | return VM_FAULT_FALLBACK; |
| 559 | } |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 560 | |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 561 | pgtable = pte_alloc_one(vma->vm_mm, haddr); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 562 | if (unlikely(!pgtable)) { |
Michal Hocko | 6b31d59 | 2017-08-18 15:16:15 -0700 | [diff] [blame] | 563 | ret = VM_FAULT_OOM; |
| 564 | goto release; |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 565 | } |
| 566 | |
Huang Ying | c79b57e | 2017-09-06 16:25:04 -0700 | [diff] [blame] | 567 | clear_huge_page(page, vmf->address, HPAGE_PMD_NR); |
Minchan Kim | 52f3762 | 2013-04-29 15:08:15 -0700 | [diff] [blame] | 568 | /* |
| 569 | * The memory barrier inside __SetPageUptodate makes sure that |
| 570 | * clear_huge_page writes become visible before the set_pmd_at() |
| 571 | * write. |
| 572 | */ |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 573 | __SetPageUptodate(page); |
| 574 | |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 575 | vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); |
| 576 | if (unlikely(!pmd_none(*vmf->pmd))) { |
Michal Hocko | 6b31d59 | 2017-08-18 15:16:15 -0700 | [diff] [blame] | 577 | goto unlock_release; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 578 | } else { |
| 579 | pmd_t entry; |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 580 | |
Michal Hocko | 6b31d59 | 2017-08-18 15:16:15 -0700 | [diff] [blame] | 581 | ret = check_stable_address_space(vma->vm_mm); |
| 582 | if (ret) |
| 583 | goto unlock_release; |
| 584 | |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 585 | /* Deliver the page fault to userland */ |
| 586 | if (userfaultfd_missing(vma)) { |
| 587 | int ret; |
| 588 | |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 589 | spin_unlock(vmf->ptl); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 590 | mem_cgroup_cancel_charge(page, memcg, true); |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 591 | put_page(page); |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 592 | pte_free(vma->vm_mm, pgtable); |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 593 | ret = handle_userfault(vmf, VM_UFFD_MISSING); |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 594 | VM_BUG_ON(ret & VM_FAULT_FALLBACK); |
| 595 | return ret; |
| 596 | } |
| 597 | |
Kirill A. Shutemov | 3122359 | 2013-09-12 15:14:01 -0700 | [diff] [blame] | 598 | entry = mk_huge_pmd(page, vma->vm_page_prot); |
Linus Torvalds | f55e101 | 2017-11-29 09:01:01 -0800 | [diff] [blame] | 599 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 600 | page_add_new_anon_rmap(page, vma, haddr, true); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 601 | mem_cgroup_commit_charge(page, memcg, false, true); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 602 | lru_cache_add_active_or_unevictable(page, vma); |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 603 | pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); |
| 604 | set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 605 | add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); |
Kirill A. Shutemov | c481290 | 2017-11-15 17:35:37 -0800 | [diff] [blame] | 606 | mm_inc_nr_ptes(vma->vm_mm); |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 607 | spin_unlock(vmf->ptl); |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 608 | count_vm_event(THP_FAULT_ALLOC); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 609 | } |
| 610 | |
David Rientjes | aa2e878 | 2012-05-29 15:06:17 -0700 | [diff] [blame] | 611 | return 0; |
Michal Hocko | 6b31d59 | 2017-08-18 15:16:15 -0700 | [diff] [blame] | 612 | unlock_release: |
| 613 | spin_unlock(vmf->ptl); |
| 614 | release: |
| 615 | if (pgtable) |
| 616 | pte_free(vma->vm_mm, pgtable); |
| 617 | mem_cgroup_cancel_charge(page, memcg, true); |
| 618 | put_page(page); |
| 619 | return ret; |
| 620 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 621 | } |
| 622 | |
Mel Gorman | 444eb2a4 | 2016-03-17 14:19:23 -0700 | [diff] [blame] | 623 | /* |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 624 | * always: directly stall for all thp allocations |
| 625 | * defer: wake kswapd and fail if not immediately available |
| 626 | * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise |
| 627 | * fail if not immediately available |
| 628 | * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately |
| 629 | * available |
| 630 | * never: never stall for any thp allocation |
Mel Gorman | 444eb2a4 | 2016-03-17 14:19:23 -0700 | [diff] [blame] | 631 | */ |
| 632 | static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma) |
Andrea Arcangeli | 0bbbc0b | 2011-01-13 15:47:05 -0800 | [diff] [blame] | 633 | { |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 634 | const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); |
Mel Gorman | 444eb2a4 | 2016-03-17 14:19:23 -0700 | [diff] [blame] | 635 | |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 636 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) |
Vlastimil Babka | 2516035 | 2016-07-28 15:49:25 -0700 | [diff] [blame] | 637 | return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); |
David Rientjes | 21440d7 | 2017-02-22 15:45:49 -0800 | [diff] [blame] | 638 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) |
| 639 | return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; |
| 640 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) |
| 641 | return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : |
| 642 | __GFP_KSWAPD_RECLAIM); |
| 643 | if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) |
| 644 | return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : |
| 645 | 0); |
Vlastimil Babka | 2516035 | 2016-07-28 15:49:25 -0700 | [diff] [blame] | 646 | return GFP_TRANSHUGE_LIGHT; |
Mel Gorman | 444eb2a4 | 2016-03-17 14:19:23 -0700 | [diff] [blame] | 647 | } |
| 648 | |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 649 | /* Caller must hold page table lock. */ |
Kirill A. Shutemov | d295e34 | 2015-09-08 14:59:34 -0700 | [diff] [blame] | 650 | static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 651 | struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, |
Kirill A. Shutemov | 5918d10 | 2013-04-29 15:08:44 -0700 | [diff] [blame] | 652 | struct page *zero_page) |
Kirill A. Shutemov | fc9fe82 | 2012-12-12 13:50:51 -0800 | [diff] [blame] | 653 | { |
| 654 | pmd_t entry; |
Andrew Morton | 7c41416 | 2015-09-08 14:58:43 -0700 | [diff] [blame] | 655 | if (!pmd_none(*pmd)) |
| 656 | return false; |
Kirill A. Shutemov | 5918d10 | 2013-04-29 15:08:44 -0700 | [diff] [blame] | 657 | entry = mk_pmd(zero_page, vma->vm_page_prot); |
Kirill A. Shutemov | fc9fe82 | 2012-12-12 13:50:51 -0800 | [diff] [blame] | 658 | entry = pmd_mkhuge(entry); |
Matthew Wilcox | 12c9d70 | 2016-02-02 16:57:57 -0800 | [diff] [blame] | 659 | if (pgtable) |
| 660 | pgtable_trans_huge_deposit(mm, pmd, pgtable); |
Kirill A. Shutemov | fc9fe82 | 2012-12-12 13:50:51 -0800 | [diff] [blame] | 661 | set_pmd_at(mm, haddr, pmd, entry); |
Kirill A. Shutemov | c481290 | 2017-11-15 17:35:37 -0800 | [diff] [blame] | 662 | mm_inc_nr_ptes(mm); |
Andrew Morton | 7c41416 | 2015-09-08 14:58:43 -0700 | [diff] [blame] | 663 | return true; |
Kirill A. Shutemov | fc9fe82 | 2012-12-12 13:50:51 -0800 | [diff] [blame] | 664 | } |
| 665 | |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 666 | int do_huge_pmd_anonymous_page(struct vm_fault *vmf) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 667 | { |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 668 | struct vm_area_struct *vma = vmf->vma; |
Aneesh Kumar K.V | 077fcf1 | 2015-02-11 15:27:12 -0800 | [diff] [blame] | 669 | gfp_t gfp; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 670 | struct page *page; |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 671 | unsigned long haddr = vmf->address & HPAGE_PMD_MASK; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 672 | |
Kirill A. Shutemov | 128ec03 | 2013-09-12 15:14:03 -0700 | [diff] [blame] | 673 | if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) |
Kirill A. Shutemov | c029255 | 2013-09-12 15:14:05 -0700 | [diff] [blame] | 674 | return VM_FAULT_FALLBACK; |
Kirill A. Shutemov | 128ec03 | 2013-09-12 15:14:03 -0700 | [diff] [blame] | 675 | if (unlikely(anon_vma_prepare(vma))) |
| 676 | return VM_FAULT_OOM; |
David Rientjes | 6d50e60 | 2014-10-29 14:50:31 -0700 | [diff] [blame] | 677 | if (unlikely(khugepaged_enter(vma, vma->vm_flags))) |
Kirill A. Shutemov | 128ec03 | 2013-09-12 15:14:03 -0700 | [diff] [blame] | 678 | return VM_FAULT_OOM; |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 679 | if (!(vmf->flags & FAULT_FLAG_WRITE) && |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 680 | !mm_forbids_zeropage(vma->vm_mm) && |
Kirill A. Shutemov | 128ec03 | 2013-09-12 15:14:03 -0700 | [diff] [blame] | 681 | transparent_hugepage_use_zero_page()) { |
| 682 | pgtable_t pgtable; |
| 683 | struct page *zero_page; |
| 684 | bool set; |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 685 | int ret; |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 686 | pgtable = pte_alloc_one(vma->vm_mm, haddr); |
Kirill A. Shutemov | 128ec03 | 2013-09-12 15:14:03 -0700 | [diff] [blame] | 687 | if (unlikely(!pgtable)) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 688 | return VM_FAULT_OOM; |
Aaron Lu | 6fcb52a | 2016-10-07 17:00:08 -0700 | [diff] [blame] | 689 | zero_page = mm_get_huge_zero_page(vma->vm_mm); |
Kirill A. Shutemov | 128ec03 | 2013-09-12 15:14:03 -0700 | [diff] [blame] | 690 | if (unlikely(!zero_page)) { |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 691 | pte_free(vma->vm_mm, pgtable); |
Andi Kleen | 81ab420 | 2011-04-14 15:22:06 -0700 | [diff] [blame] | 692 | count_vm_event(THP_FAULT_FALLBACK); |
Kirill A. Shutemov | c029255 | 2013-09-12 15:14:05 -0700 | [diff] [blame] | 693 | return VM_FAULT_FALLBACK; |
Andi Kleen | 81ab420 | 2011-04-14 15:22:06 -0700 | [diff] [blame] | 694 | } |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 695 | vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 696 | ret = 0; |
| 697 | set = false; |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 698 | if (pmd_none(*vmf->pmd)) { |
Michal Hocko | 6b31d59 | 2017-08-18 15:16:15 -0700 | [diff] [blame] | 699 | ret = check_stable_address_space(vma->vm_mm); |
| 700 | if (ret) { |
| 701 | spin_unlock(vmf->ptl); |
| 702 | } else if (userfaultfd_missing(vma)) { |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 703 | spin_unlock(vmf->ptl); |
| 704 | ret = handle_userfault(vmf, VM_UFFD_MISSING); |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 705 | VM_BUG_ON(ret & VM_FAULT_FALLBACK); |
| 706 | } else { |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 707 | set_huge_zero_page(pgtable, vma->vm_mm, vma, |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 708 | haddr, vmf->pmd, zero_page); |
| 709 | spin_unlock(vmf->ptl); |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 710 | set = true; |
| 711 | } |
| 712 | } else |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 713 | spin_unlock(vmf->ptl); |
Aaron Lu | 6fcb52a | 2016-10-07 17:00:08 -0700 | [diff] [blame] | 714 | if (!set) |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 715 | pte_free(vma->vm_mm, pgtable); |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 716 | return ret; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 717 | } |
Mel Gorman | 444eb2a4 | 2016-03-17 14:19:23 -0700 | [diff] [blame] | 718 | gfp = alloc_hugepage_direct_gfpmask(vma); |
Aneesh Kumar K.V | 077fcf1 | 2015-02-11 15:27:12 -0800 | [diff] [blame] | 719 | page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); |
Kirill A. Shutemov | 128ec03 | 2013-09-12 15:14:03 -0700 | [diff] [blame] | 720 | if (unlikely(!page)) { |
| 721 | count_vm_event(THP_FAULT_FALLBACK); |
Kirill A. Shutemov | c029255 | 2013-09-12 15:14:05 -0700 | [diff] [blame] | 722 | return VM_FAULT_FALLBACK; |
Kirill A. Shutemov | 128ec03 | 2013-09-12 15:14:03 -0700 | [diff] [blame] | 723 | } |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 724 | prep_transhuge_page(page); |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 725 | return __do_huge_pmd_anonymous_page(vmf, page, gfp); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 726 | } |
| 727 | |
Matthew Wilcox | ae18d6d | 2015-09-08 14:59:14 -0700 | [diff] [blame] | 728 | static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, |
Oliver O'Halloran | 3b6521f | 2017-05-08 15:59:43 -0700 | [diff] [blame] | 729 | pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, |
| 730 | pgtable_t pgtable) |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 731 | { |
| 732 | struct mm_struct *mm = vma->vm_mm; |
| 733 | pmd_t entry; |
| 734 | spinlock_t *ptl; |
| 735 | |
| 736 | ptl = pmd_lock(mm, pmd); |
Dan Williams | f25748e3 | 2016-01-15 16:56:43 -0800 | [diff] [blame] | 737 | entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); |
| 738 | if (pfn_t_devmap(pfn)) |
| 739 | entry = pmd_mkdevmap(entry); |
Ross Zwisler | 01871e5 | 2016-01-15 16:56:02 -0800 | [diff] [blame] | 740 | if (write) { |
Linus Torvalds | f55e101 | 2017-11-29 09:01:01 -0800 | [diff] [blame] | 741 | entry = pmd_mkyoung(pmd_mkdirty(entry)); |
| 742 | entry = maybe_pmd_mkwrite(entry, vma); |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 743 | } |
Oliver O'Halloran | 3b6521f | 2017-05-08 15:59:43 -0700 | [diff] [blame] | 744 | |
| 745 | if (pgtable) { |
| 746 | pgtable_trans_huge_deposit(mm, pmd, pgtable); |
Kirill A. Shutemov | c481290 | 2017-11-15 17:35:37 -0800 | [diff] [blame] | 747 | mm_inc_nr_ptes(mm); |
Oliver O'Halloran | 3b6521f | 2017-05-08 15:59:43 -0700 | [diff] [blame] | 748 | } |
| 749 | |
Ross Zwisler | 01871e5 | 2016-01-15 16:56:02 -0800 | [diff] [blame] | 750 | set_pmd_at(mm, addr, pmd, entry); |
| 751 | update_mmu_cache_pmd(vma, addr, pmd); |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 752 | spin_unlock(ptl); |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 753 | } |
| 754 | |
| 755 | int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, |
Dan Williams | f25748e3 | 2016-01-15 16:56:43 -0800 | [diff] [blame] | 756 | pmd_t *pmd, pfn_t pfn, bool write) |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 757 | { |
| 758 | pgprot_t pgprot = vma->vm_page_prot; |
Oliver O'Halloran | 3b6521f | 2017-05-08 15:59:43 -0700 | [diff] [blame] | 759 | pgtable_t pgtable = NULL; |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 760 | /* |
| 761 | * If we had pmd_special, we could avoid all these restrictions, |
| 762 | * but we need to be consistent with PTEs and architectures that |
| 763 | * can't support a 'special' bit. |
| 764 | */ |
Dave Jiang | e1fb4a0 | 2018-08-17 15:43:40 -0700 | [diff] [blame] | 765 | BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && |
| 766 | !pfn_t_devmap(pfn)); |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 767 | BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == |
| 768 | (VM_PFNMAP|VM_MIXEDMAP)); |
| 769 | BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 770 | |
| 771 | if (addr < vma->vm_start || addr >= vma->vm_end) |
| 772 | return VM_FAULT_SIGBUS; |
Borislav Petkov | 308a047 | 2016-10-26 19:43:43 +0200 | [diff] [blame] | 773 | |
Oliver O'Halloran | 3b6521f | 2017-05-08 15:59:43 -0700 | [diff] [blame] | 774 | if (arch_needs_pgtable_deposit()) { |
| 775 | pgtable = pte_alloc_one(vma->vm_mm, addr); |
| 776 | if (!pgtable) |
| 777 | return VM_FAULT_OOM; |
| 778 | } |
| 779 | |
Borislav Petkov | 308a047 | 2016-10-26 19:43:43 +0200 | [diff] [blame] | 780 | track_pfn_insert(vma, &pgprot, pfn); |
| 781 | |
Oliver O'Halloran | 3b6521f | 2017-05-08 15:59:43 -0700 | [diff] [blame] | 782 | insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write, pgtable); |
Matthew Wilcox | ae18d6d | 2015-09-08 14:59:14 -0700 | [diff] [blame] | 783 | return VM_FAULT_NOPAGE; |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 784 | } |
Dan Williams | dee4107 | 2016-05-14 12:20:44 -0700 | [diff] [blame] | 785 | EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 786 | |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 787 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
Linus Torvalds | f55e101 | 2017-11-29 09:01:01 -0800 | [diff] [blame] | 788 | static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 789 | { |
Linus Torvalds | f55e101 | 2017-11-29 09:01:01 -0800 | [diff] [blame] | 790 | if (likely(vma->vm_flags & VM_WRITE)) |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 791 | pud = pud_mkwrite(pud); |
| 792 | return pud; |
| 793 | } |
| 794 | |
| 795 | static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, |
| 796 | pud_t *pud, pfn_t pfn, pgprot_t prot, bool write) |
| 797 | { |
| 798 | struct mm_struct *mm = vma->vm_mm; |
| 799 | pud_t entry; |
| 800 | spinlock_t *ptl; |
| 801 | |
| 802 | ptl = pud_lock(mm, pud); |
| 803 | entry = pud_mkhuge(pfn_t_pud(pfn, prot)); |
| 804 | if (pfn_t_devmap(pfn)) |
| 805 | entry = pud_mkdevmap(entry); |
| 806 | if (write) { |
Linus Torvalds | f55e101 | 2017-11-29 09:01:01 -0800 | [diff] [blame] | 807 | entry = pud_mkyoung(pud_mkdirty(entry)); |
| 808 | entry = maybe_pud_mkwrite(entry, vma); |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 809 | } |
| 810 | set_pud_at(mm, addr, pud, entry); |
| 811 | update_mmu_cache_pud(vma, addr, pud); |
| 812 | spin_unlock(ptl); |
| 813 | } |
| 814 | |
| 815 | int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, |
| 816 | pud_t *pud, pfn_t pfn, bool write) |
| 817 | { |
| 818 | pgprot_t pgprot = vma->vm_page_prot; |
| 819 | /* |
| 820 | * If we had pud_special, we could avoid all these restrictions, |
| 821 | * but we need to be consistent with PTEs and architectures that |
| 822 | * can't support a 'special' bit. |
| 823 | */ |
| 824 | BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); |
| 825 | BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == |
| 826 | (VM_PFNMAP|VM_MIXEDMAP)); |
| 827 | BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); |
| 828 | BUG_ON(!pfn_t_devmap(pfn)); |
| 829 | |
| 830 | if (addr < vma->vm_start || addr >= vma->vm_end) |
| 831 | return VM_FAULT_SIGBUS; |
| 832 | |
| 833 | track_pfn_insert(vma, &pgprot, pfn); |
| 834 | |
| 835 | insert_pfn_pud(vma, addr, pud, pfn, pgprot, write); |
| 836 | return VM_FAULT_NOPAGE; |
| 837 | } |
| 838 | EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud); |
| 839 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
| 840 | |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 841 | static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, |
Kirill A. Shutemov | a8f9736 | 2017-11-27 06:21:25 +0300 | [diff] [blame] | 842 | pmd_t *pmd, int flags) |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 843 | { |
| 844 | pmd_t _pmd; |
| 845 | |
Kirill A. Shutemov | a8f9736 | 2017-11-27 06:21:25 +0300 | [diff] [blame] | 846 | _pmd = pmd_mkyoung(*pmd); |
| 847 | if (flags & FOLL_WRITE) |
| 848 | _pmd = pmd_mkdirty(_pmd); |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 849 | if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, |
Kirill A. Shutemov | a8f9736 | 2017-11-27 06:21:25 +0300 | [diff] [blame] | 850 | pmd, _pmd, flags & FOLL_WRITE)) |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 851 | update_mmu_cache_pmd(vma, addr, pmd); |
| 852 | } |
| 853 | |
| 854 | struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, |
| 855 | pmd_t *pmd, int flags) |
| 856 | { |
| 857 | unsigned long pfn = pmd_pfn(*pmd); |
| 858 | struct mm_struct *mm = vma->vm_mm; |
| 859 | struct dev_pagemap *pgmap; |
| 860 | struct page *page; |
| 861 | |
| 862 | assert_spin_locked(pmd_lockptr(mm, pmd)); |
| 863 | |
Keno Fischer | 8310d48 | 2017-01-24 15:17:48 -0800 | [diff] [blame] | 864 | /* |
| 865 | * When we COW a devmap PMD entry, we split it into PTEs, so we should |
| 866 | * not be in this function with `flags & FOLL_COW` set. |
| 867 | */ |
| 868 | WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set"); |
| 869 | |
Linus Torvalds | f6f3732 | 2017-12-15 18:53:22 -0800 | [diff] [blame] | 870 | if (flags & FOLL_WRITE && !pmd_write(*pmd)) |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 871 | return NULL; |
| 872 | |
| 873 | if (pmd_present(*pmd) && pmd_devmap(*pmd)) |
| 874 | /* pass */; |
| 875 | else |
| 876 | return NULL; |
| 877 | |
| 878 | if (flags & FOLL_TOUCH) |
Kirill A. Shutemov | a8f9736 | 2017-11-27 06:21:25 +0300 | [diff] [blame] | 879 | touch_pmd(vma, addr, pmd, flags); |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 880 | |
| 881 | /* |
| 882 | * device mapped pages can only be returned if the |
| 883 | * caller will manage the page reference count. |
| 884 | */ |
| 885 | if (!(flags & FOLL_GET)) |
| 886 | return ERR_PTR(-EEXIST); |
| 887 | |
| 888 | pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT; |
| 889 | pgmap = get_dev_pagemap(pfn, NULL); |
| 890 | if (!pgmap) |
| 891 | return ERR_PTR(-EFAULT); |
| 892 | page = pfn_to_page(pfn); |
| 893 | get_page(page); |
| 894 | put_dev_pagemap(pgmap); |
| 895 | |
| 896 | return page; |
| 897 | } |
| 898 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 899 | int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
| 900 | pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, |
| 901 | struct vm_area_struct *vma) |
| 902 | { |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 903 | spinlock_t *dst_ptl, *src_ptl; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 904 | struct page *src_page; |
| 905 | pmd_t pmd; |
Matthew Wilcox | 12c9d70 | 2016-02-02 16:57:57 -0800 | [diff] [blame] | 906 | pgtable_t pgtable = NULL; |
Kirill A. Shutemov | 628d47c | 2016-07-26 15:25:42 -0700 | [diff] [blame] | 907 | int ret = -ENOMEM; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 908 | |
Kirill A. Shutemov | 628d47c | 2016-07-26 15:25:42 -0700 | [diff] [blame] | 909 | /* Skip if can be re-fill on fault */ |
| 910 | if (!vma_is_anonymous(vma)) |
| 911 | return 0; |
| 912 | |
| 913 | pgtable = pte_alloc_one(dst_mm, addr); |
| 914 | if (unlikely(!pgtable)) |
| 915 | goto out; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 916 | |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 917 | dst_ptl = pmd_lock(dst_mm, dst_pmd); |
| 918 | src_ptl = pmd_lockptr(src_mm, src_pmd); |
| 919 | spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 920 | |
| 921 | ret = -EAGAIN; |
| 922 | pmd = *src_pmd; |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 923 | |
| 924 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION |
| 925 | if (unlikely(is_swap_pmd(pmd))) { |
| 926 | swp_entry_t entry = pmd_to_swp_entry(pmd); |
| 927 | |
| 928 | VM_BUG_ON(!is_pmd_migration_entry(pmd)); |
| 929 | if (is_write_migration_entry(entry)) { |
| 930 | make_migration_entry_read(&entry); |
| 931 | pmd = swp_entry_to_pmd(entry); |
Naoya Horiguchi | ab6e3d0 | 2017-09-08 16:11:04 -0700 | [diff] [blame] | 932 | if (pmd_swp_soft_dirty(*src_pmd)) |
| 933 | pmd = pmd_swp_mksoft_dirty(pmd); |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 934 | set_pmd_at(src_mm, addr, src_pmd, pmd); |
| 935 | } |
Zi Yan | dd8a67f | 2017-11-02 15:59:47 -0700 | [diff] [blame] | 936 | add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); |
Kirill A. Shutemov | af5b0f6 | 2017-11-15 17:35:40 -0800 | [diff] [blame] | 937 | mm_inc_nr_ptes(dst_mm); |
Zi Yan | dd8a67f | 2017-11-02 15:59:47 -0700 | [diff] [blame] | 938 | pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 939 | set_pmd_at(dst_mm, addr, dst_pmd, pmd); |
| 940 | ret = 0; |
| 941 | goto out_unlock; |
| 942 | } |
| 943 | #endif |
| 944 | |
Kirill A. Shutemov | 628d47c | 2016-07-26 15:25:42 -0700 | [diff] [blame] | 945 | if (unlikely(!pmd_trans_huge(pmd))) { |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 946 | pte_free(dst_mm, pgtable); |
| 947 | goto out_unlock; |
| 948 | } |
Kirill A. Shutemov | fc9fe82 | 2012-12-12 13:50:51 -0800 | [diff] [blame] | 949 | /* |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 950 | * When page table lock is held, the huge zero pmd should not be |
Kirill A. Shutemov | fc9fe82 | 2012-12-12 13:50:51 -0800 | [diff] [blame] | 951 | * under splitting since we don't split the page itself, only pmd to |
| 952 | * a page table. |
| 953 | */ |
| 954 | if (is_huge_zero_pmd(pmd)) { |
Kirill A. Shutemov | 5918d10 | 2013-04-29 15:08:44 -0700 | [diff] [blame] | 955 | struct page *zero_page; |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 956 | /* |
| 957 | * get_huge_zero_page() will never allocate a new page here, |
| 958 | * since we already have a zero page to copy. It just takes a |
| 959 | * reference. |
| 960 | */ |
Aaron Lu | 6fcb52a | 2016-10-07 17:00:08 -0700 | [diff] [blame] | 961 | zero_page = mm_get_huge_zero_page(dst_mm); |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 962 | set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd, |
Kirill A. Shutemov | 5918d10 | 2013-04-29 15:08:44 -0700 | [diff] [blame] | 963 | zero_page); |
Kirill A. Shutemov | fc9fe82 | 2012-12-12 13:50:51 -0800 | [diff] [blame] | 964 | ret = 0; |
| 965 | goto out_unlock; |
| 966 | } |
Mel Gorman | de466bd | 2013-12-18 17:08:42 -0800 | [diff] [blame] | 967 | |
Kirill A. Shutemov | 628d47c | 2016-07-26 15:25:42 -0700 | [diff] [blame] | 968 | src_page = pmd_page(pmd); |
| 969 | VM_BUG_ON_PAGE(!PageHead(src_page), src_page); |
| 970 | get_page(src_page); |
| 971 | page_dup_rmap(src_page, true); |
| 972 | add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); |
Kirill A. Shutemov | c481290 | 2017-11-15 17:35:37 -0800 | [diff] [blame] | 973 | mm_inc_nr_ptes(dst_mm); |
Kirill A. Shutemov | 628d47c | 2016-07-26 15:25:42 -0700 | [diff] [blame] | 974 | pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 975 | |
| 976 | pmdp_set_wrprotect(src_mm, addr, src_pmd); |
| 977 | pmd = pmd_mkold(pmd_wrprotect(pmd)); |
| 978 | set_pmd_at(dst_mm, addr, dst_pmd, pmd); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 979 | |
| 980 | ret = 0; |
| 981 | out_unlock: |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 982 | spin_unlock(src_ptl); |
| 983 | spin_unlock(dst_ptl); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 984 | out: |
| 985 | return ret; |
| 986 | } |
| 987 | |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 988 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
| 989 | static void touch_pud(struct vm_area_struct *vma, unsigned long addr, |
Kirill A. Shutemov | a8f9736 | 2017-11-27 06:21:25 +0300 | [diff] [blame] | 990 | pud_t *pud, int flags) |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 991 | { |
| 992 | pud_t _pud; |
| 993 | |
Kirill A. Shutemov | a8f9736 | 2017-11-27 06:21:25 +0300 | [diff] [blame] | 994 | _pud = pud_mkyoung(*pud); |
| 995 | if (flags & FOLL_WRITE) |
| 996 | _pud = pud_mkdirty(_pud); |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 997 | if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, |
Kirill A. Shutemov | a8f9736 | 2017-11-27 06:21:25 +0300 | [diff] [blame] | 998 | pud, _pud, flags & FOLL_WRITE)) |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 999 | update_mmu_cache_pud(vma, addr, pud); |
| 1000 | } |
| 1001 | |
| 1002 | struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, |
| 1003 | pud_t *pud, int flags) |
| 1004 | { |
| 1005 | unsigned long pfn = pud_pfn(*pud); |
| 1006 | struct mm_struct *mm = vma->vm_mm; |
| 1007 | struct dev_pagemap *pgmap; |
| 1008 | struct page *page; |
| 1009 | |
| 1010 | assert_spin_locked(pud_lockptr(mm, pud)); |
| 1011 | |
Linus Torvalds | f6f3732 | 2017-12-15 18:53:22 -0800 | [diff] [blame] | 1012 | if (flags & FOLL_WRITE && !pud_write(*pud)) |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1013 | return NULL; |
| 1014 | |
| 1015 | if (pud_present(*pud) && pud_devmap(*pud)) |
| 1016 | /* pass */; |
| 1017 | else |
| 1018 | return NULL; |
| 1019 | |
| 1020 | if (flags & FOLL_TOUCH) |
Kirill A. Shutemov | a8f9736 | 2017-11-27 06:21:25 +0300 | [diff] [blame] | 1021 | touch_pud(vma, addr, pud, flags); |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1022 | |
| 1023 | /* |
| 1024 | * device mapped pages can only be returned if the |
| 1025 | * caller will manage the page reference count. |
| 1026 | */ |
| 1027 | if (!(flags & FOLL_GET)) |
| 1028 | return ERR_PTR(-EEXIST); |
| 1029 | |
| 1030 | pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT; |
| 1031 | pgmap = get_dev_pagemap(pfn, NULL); |
| 1032 | if (!pgmap) |
| 1033 | return ERR_PTR(-EFAULT); |
| 1034 | page = pfn_to_page(pfn); |
| 1035 | get_page(page); |
| 1036 | put_dev_pagemap(pgmap); |
| 1037 | |
| 1038 | return page; |
| 1039 | } |
| 1040 | |
| 1041 | int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
| 1042 | pud_t *dst_pud, pud_t *src_pud, unsigned long addr, |
| 1043 | struct vm_area_struct *vma) |
| 1044 | { |
| 1045 | spinlock_t *dst_ptl, *src_ptl; |
| 1046 | pud_t pud; |
| 1047 | int ret; |
| 1048 | |
| 1049 | dst_ptl = pud_lock(dst_mm, dst_pud); |
| 1050 | src_ptl = pud_lockptr(src_mm, src_pud); |
| 1051 | spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); |
| 1052 | |
| 1053 | ret = -EAGAIN; |
| 1054 | pud = *src_pud; |
| 1055 | if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud))) |
| 1056 | goto out_unlock; |
| 1057 | |
| 1058 | /* |
| 1059 | * When page table lock is held, the huge zero pud should not be |
| 1060 | * under splitting since we don't split the page itself, only pud to |
| 1061 | * a page table. |
| 1062 | */ |
| 1063 | if (is_huge_zero_pud(pud)) { |
| 1064 | /* No huge zero pud yet */ |
| 1065 | } |
| 1066 | |
| 1067 | pudp_set_wrprotect(src_mm, addr, src_pud); |
| 1068 | pud = pud_mkold(pud_wrprotect(pud)); |
| 1069 | set_pud_at(dst_mm, addr, dst_pud, pud); |
| 1070 | |
| 1071 | ret = 0; |
| 1072 | out_unlock: |
| 1073 | spin_unlock(src_ptl); |
| 1074 | spin_unlock(dst_ptl); |
| 1075 | return ret; |
| 1076 | } |
| 1077 | |
| 1078 | void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) |
| 1079 | { |
| 1080 | pud_t entry; |
| 1081 | unsigned long haddr; |
| 1082 | bool write = vmf->flags & FAULT_FLAG_WRITE; |
| 1083 | |
| 1084 | vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); |
| 1085 | if (unlikely(!pud_same(*vmf->pud, orig_pud))) |
| 1086 | goto unlock; |
| 1087 | |
| 1088 | entry = pud_mkyoung(orig_pud); |
| 1089 | if (write) |
| 1090 | entry = pud_mkdirty(entry); |
| 1091 | haddr = vmf->address & HPAGE_PUD_MASK; |
| 1092 | if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write)) |
| 1093 | update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud); |
| 1094 | |
| 1095 | unlock: |
| 1096 | spin_unlock(vmf->ptl); |
| 1097 | } |
| 1098 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
| 1099 | |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1100 | void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd) |
Will Deacon | a1dd450 | 2012-12-11 16:01:27 -0800 | [diff] [blame] | 1101 | { |
| 1102 | pmd_t entry; |
| 1103 | unsigned long haddr; |
Minchan Kim | 20f664a | 2017-01-10 16:57:51 -0800 | [diff] [blame] | 1104 | bool write = vmf->flags & FAULT_FLAG_WRITE; |
Will Deacon | a1dd450 | 2012-12-11 16:01:27 -0800 | [diff] [blame] | 1105 | |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1106 | vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); |
| 1107 | if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) |
Will Deacon | a1dd450 | 2012-12-11 16:01:27 -0800 | [diff] [blame] | 1108 | goto unlock; |
| 1109 | |
| 1110 | entry = pmd_mkyoung(orig_pmd); |
Minchan Kim | 20f664a | 2017-01-10 16:57:51 -0800 | [diff] [blame] | 1111 | if (write) |
| 1112 | entry = pmd_mkdirty(entry); |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1113 | haddr = vmf->address & HPAGE_PMD_MASK; |
Minchan Kim | 20f664a | 2017-01-10 16:57:51 -0800 | [diff] [blame] | 1114 | if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write)) |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1115 | update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd); |
Will Deacon | a1dd450 | 2012-12-11 16:01:27 -0800 | [diff] [blame] | 1116 | |
| 1117 | unlock: |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1118 | spin_unlock(vmf->ptl); |
Will Deacon | a1dd450 | 2012-12-11 16:01:27 -0800 | [diff] [blame] | 1119 | } |
| 1120 | |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1121 | static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd, |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 1122 | struct page *page) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1123 | { |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1124 | struct vm_area_struct *vma = vmf->vma; |
| 1125 | unsigned long haddr = vmf->address & HPAGE_PMD_MASK; |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1126 | struct mem_cgroup *memcg; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1127 | pgtable_t pgtable; |
| 1128 | pmd_t _pmd; |
| 1129 | int ret = 0, i; |
| 1130 | struct page **pages; |
Sagi Grimberg | 2ec74c3 | 2012-10-08 16:33:33 -0700 | [diff] [blame] | 1131 | unsigned long mmun_start; /* For mmu_notifiers */ |
| 1132 | unsigned long mmun_end; /* For mmu_notifiers */ |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1133 | |
Kees Cook | 6da2ec5 | 2018-06-12 13:55:00 -0700 | [diff] [blame] | 1134 | pages = kmalloc_array(HPAGE_PMD_NR, sizeof(struct page *), |
| 1135 | GFP_KERNEL); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1136 | if (unlikely(!pages)) { |
| 1137 | ret |= VM_FAULT_OOM; |
| 1138 | goto out; |
| 1139 | } |
| 1140 | |
| 1141 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
Michal Hocko | 41b6167 | 2017-01-10 16:57:42 -0800 | [diff] [blame] | 1142 | pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma, |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1143 | vmf->address, page_to_nid(page)); |
Andrea Arcangeli | b9bbfbe | 2011-01-13 15:46:57 -0800 | [diff] [blame] | 1144 | if (unlikely(!pages[i] || |
Tejun Heo | 2cf8558 | 2018-07-03 11:14:56 -0400 | [diff] [blame] | 1145 | mem_cgroup_try_charge_delay(pages[i], vma->vm_mm, |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 1146 | GFP_KERNEL, &memcg, false))) { |
Andrea Arcangeli | b9bbfbe | 2011-01-13 15:46:57 -0800 | [diff] [blame] | 1147 | if (pages[i]) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1148 | put_page(pages[i]); |
Andrea Arcangeli | b9bbfbe | 2011-01-13 15:46:57 -0800 | [diff] [blame] | 1149 | while (--i >= 0) { |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1150 | memcg = (void *)page_private(pages[i]); |
| 1151 | set_page_private(pages[i], 0); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 1152 | mem_cgroup_cancel_charge(pages[i], memcg, |
| 1153 | false); |
Andrea Arcangeli | b9bbfbe | 2011-01-13 15:46:57 -0800 | [diff] [blame] | 1154 | put_page(pages[i]); |
| 1155 | } |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1156 | kfree(pages); |
| 1157 | ret |= VM_FAULT_OOM; |
| 1158 | goto out; |
| 1159 | } |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1160 | set_page_private(pages[i], (unsigned long)memcg); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1161 | } |
| 1162 | |
| 1163 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
| 1164 | copy_user_highpage(pages[i], page + i, |
Hillf Danton | 0089e48 | 2011-10-31 17:09:38 -0700 | [diff] [blame] | 1165 | haddr + PAGE_SIZE * i, vma); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1166 | __SetPageUptodate(pages[i]); |
| 1167 | cond_resched(); |
| 1168 | } |
| 1169 | |
Sagi Grimberg | 2ec74c3 | 2012-10-08 16:33:33 -0700 | [diff] [blame] | 1170 | mmun_start = haddr; |
| 1171 | mmun_end = haddr + HPAGE_PMD_SIZE; |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 1172 | mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); |
Sagi Grimberg | 2ec74c3 | 2012-10-08 16:33:33 -0700 | [diff] [blame] | 1173 | |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1174 | vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); |
| 1175 | if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1176 | goto out_free_pages; |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 1177 | VM_BUG_ON_PAGE(!PageHead(page), page); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1178 | |
Jérôme Glisse | 0f10851 | 2017-11-15 17:34:07 -0800 | [diff] [blame] | 1179 | /* |
| 1180 | * Leave pmd empty until pte is filled note we must notify here as |
| 1181 | * concurrent CPU thread might write to new page before the call to |
| 1182 | * mmu_notifier_invalidate_range_end() happens which can lead to a |
| 1183 | * device seeing memory write in different order than CPU. |
| 1184 | * |
Mike Rapoport | ad56b73 | 2018-03-21 21:22:47 +0200 | [diff] [blame] | 1185 | * See Documentation/vm/mmu_notifier.rst |
Jérôme Glisse | 0f10851 | 2017-11-15 17:34:07 -0800 | [diff] [blame] | 1186 | */ |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1187 | pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1188 | |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1189 | pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd); |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 1190 | pmd_populate(vma->vm_mm, &_pmd, pgtable); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1191 | |
| 1192 | for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 1193 | pte_t entry; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1194 | entry = mk_pte(pages[i], vma->vm_page_prot); |
| 1195 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1196 | memcg = (void *)page_private(pages[i]); |
| 1197 | set_page_private(pages[i], 0); |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1198 | page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 1199 | mem_cgroup_commit_charge(pages[i], memcg, false, false); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1200 | lru_cache_add_active_or_unevictable(pages[i], vma); |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1201 | vmf->pte = pte_offset_map(&_pmd, haddr); |
| 1202 | VM_BUG_ON(!pte_none(*vmf->pte)); |
| 1203 | set_pte_at(vma->vm_mm, haddr, vmf->pte, entry); |
| 1204 | pte_unmap(vmf->pte); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1205 | } |
| 1206 | kfree(pages); |
| 1207 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1208 | smp_wmb(); /* make pte visible before pmd */ |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1209 | pmd_populate(vma->vm_mm, vmf->pmd, pgtable); |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 1210 | page_remove_rmap(page, true); |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1211 | spin_unlock(vmf->ptl); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1212 | |
Jérôme Glisse | 4645b9f | 2017-11-15 17:34:11 -0800 | [diff] [blame] | 1213 | /* |
| 1214 | * No need to double call mmu_notifier->invalidate_range() callback as |
| 1215 | * the above pmdp_huge_clear_flush_notify() did already call it. |
| 1216 | */ |
| 1217 | mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start, |
| 1218 | mmun_end); |
Sagi Grimberg | 2ec74c3 | 2012-10-08 16:33:33 -0700 | [diff] [blame] | 1219 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1220 | ret |= VM_FAULT_WRITE; |
| 1221 | put_page(page); |
| 1222 | |
| 1223 | out: |
| 1224 | return ret; |
| 1225 | |
| 1226 | out_free_pages: |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1227 | spin_unlock(vmf->ptl); |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 1228 | mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); |
Andrea Arcangeli | b9bbfbe | 2011-01-13 15:46:57 -0800 | [diff] [blame] | 1229 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1230 | memcg = (void *)page_private(pages[i]); |
| 1231 | set_page_private(pages[i], 0); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 1232 | mem_cgroup_cancel_charge(pages[i], memcg, false); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1233 | put_page(pages[i]); |
Andrea Arcangeli | b9bbfbe | 2011-01-13 15:46:57 -0800 | [diff] [blame] | 1234 | } |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1235 | kfree(pages); |
| 1236 | goto out; |
| 1237 | } |
| 1238 | |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1239 | int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1240 | { |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1241 | struct vm_area_struct *vma = vmf->vma; |
Kirill A. Shutemov | 93b4796 | 2012-12-12 13:50:54 -0800 | [diff] [blame] | 1242 | struct page *page = NULL, *new_page; |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1243 | struct mem_cgroup *memcg; |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1244 | unsigned long haddr = vmf->address & HPAGE_PMD_MASK; |
Sagi Grimberg | 2ec74c3 | 2012-10-08 16:33:33 -0700 | [diff] [blame] | 1245 | unsigned long mmun_start; /* For mmu_notifiers */ |
| 1246 | unsigned long mmun_end; /* For mmu_notifiers */ |
Michal Hocko | 3b363692 | 2015-04-15 16:13:29 -0700 | [diff] [blame] | 1247 | gfp_t huge_gfp; /* for allocation and charge */ |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 1248 | int ret = 0; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1249 | |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1250 | vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); |
Sasha Levin | 81d1b09 | 2014-10-09 15:28:10 -0700 | [diff] [blame] | 1251 | VM_BUG_ON_VMA(!vma->anon_vma, vma); |
Kirill A. Shutemov | 93b4796 | 2012-12-12 13:50:54 -0800 | [diff] [blame] | 1252 | if (is_huge_zero_pmd(orig_pmd)) |
| 1253 | goto alloc; |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1254 | spin_lock(vmf->ptl); |
| 1255 | if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1256 | goto out_unlock; |
| 1257 | |
| 1258 | page = pmd_page(orig_pmd); |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 1259 | VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page); |
Kirill A. Shutemov | 1f25fe2 | 2016-01-15 16:52:24 -0800 | [diff] [blame] | 1260 | /* |
| 1261 | * We can only reuse the page if nobody else maps the huge page or it's |
Andrea Arcangeli | 6d0a07e | 2016-05-12 15:42:25 -0700 | [diff] [blame] | 1262 | * part. |
Kirill A. Shutemov | 1f25fe2 | 2016-01-15 16:52:24 -0800 | [diff] [blame] | 1263 | */ |
Huang Ying | ba3c4ce | 2017-09-06 16:22:19 -0700 | [diff] [blame] | 1264 | if (!trylock_page(page)) { |
| 1265 | get_page(page); |
| 1266 | spin_unlock(vmf->ptl); |
| 1267 | lock_page(page); |
| 1268 | spin_lock(vmf->ptl); |
| 1269 | if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { |
| 1270 | unlock_page(page); |
| 1271 | put_page(page); |
| 1272 | goto out_unlock; |
| 1273 | } |
| 1274 | put_page(page); |
| 1275 | } |
| 1276 | if (reuse_swap_page(page, NULL)) { |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1277 | pmd_t entry; |
| 1278 | entry = pmd_mkyoung(orig_pmd); |
Linus Torvalds | f55e101 | 2017-11-29 09:01:01 -0800 | [diff] [blame] | 1279 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1280 | if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) |
| 1281 | update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1282 | ret |= VM_FAULT_WRITE; |
Huang Ying | ba3c4ce | 2017-09-06 16:22:19 -0700 | [diff] [blame] | 1283 | unlock_page(page); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1284 | goto out_unlock; |
| 1285 | } |
Huang Ying | ba3c4ce | 2017-09-06 16:22:19 -0700 | [diff] [blame] | 1286 | unlock_page(page); |
Kirill A. Shutemov | ddc58f2 | 2016-01-15 16:52:56 -0800 | [diff] [blame] | 1287 | get_page(page); |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1288 | spin_unlock(vmf->ptl); |
Kirill A. Shutemov | 93b4796 | 2012-12-12 13:50:54 -0800 | [diff] [blame] | 1289 | alloc: |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1290 | if (transparent_hugepage_enabled(vma) && |
Aneesh Kumar K.V | 077fcf1 | 2015-02-11 15:27:12 -0800 | [diff] [blame] | 1291 | !transparent_hugepage_debug_cow()) { |
Mel Gorman | 444eb2a4 | 2016-03-17 14:19:23 -0700 | [diff] [blame] | 1292 | huge_gfp = alloc_hugepage_direct_gfpmask(vma); |
Michal Hocko | 3b363692 | 2015-04-15 16:13:29 -0700 | [diff] [blame] | 1293 | new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); |
Aneesh Kumar K.V | 077fcf1 | 2015-02-11 15:27:12 -0800 | [diff] [blame] | 1294 | } else |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1295 | new_page = NULL; |
| 1296 | |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 1297 | if (likely(new_page)) { |
| 1298 | prep_transhuge_page(new_page); |
| 1299 | } else { |
Hugh Dickins | eecc1e4 | 2014-01-12 01:25:21 -0800 | [diff] [blame] | 1300 | if (!page) { |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1301 | split_huge_pmd(vma, vmf->pmd, vmf->address); |
Kirill A. Shutemov | e9b71ca | 2014-04-03 14:48:17 -0700 | [diff] [blame] | 1302 | ret |= VM_FAULT_FALLBACK; |
Kirill A. Shutemov | 93b4796 | 2012-12-12 13:50:54 -0800 | [diff] [blame] | 1303 | } else { |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1304 | ret = do_huge_pmd_wp_page_fallback(vmf, orig_pmd, page); |
Kirill A. Shutemov | 9845cbb | 2014-02-25 15:01:42 -0800 | [diff] [blame] | 1305 | if (ret & VM_FAULT_OOM) { |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1306 | split_huge_pmd(vma, vmf->pmd, vmf->address); |
Kirill A. Shutemov | 9845cbb | 2014-02-25 15:01:42 -0800 | [diff] [blame] | 1307 | ret |= VM_FAULT_FALLBACK; |
| 1308 | } |
Kirill A. Shutemov | ddc58f2 | 2016-01-15 16:52:56 -0800 | [diff] [blame] | 1309 | put_page(page); |
Kirill A. Shutemov | 93b4796 | 2012-12-12 13:50:54 -0800 | [diff] [blame] | 1310 | } |
David Rientjes | 17766dd | 2013-09-12 15:14:06 -0700 | [diff] [blame] | 1311 | count_vm_event(THP_FAULT_FALLBACK); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1312 | goto out; |
| 1313 | } |
| 1314 | |
Tejun Heo | 2cf8558 | 2018-07-03 11:14:56 -0400 | [diff] [blame] | 1315 | if (unlikely(mem_cgroup_try_charge_delay(new_page, vma->vm_mm, |
Michal Hocko | 2a70f6a | 2018-04-10 16:29:30 -0700 | [diff] [blame] | 1316 | huge_gfp, &memcg, true))) { |
Andrea Arcangeli | b9bbfbe | 2011-01-13 15:46:57 -0800 | [diff] [blame] | 1317 | put_page(new_page); |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1318 | split_huge_pmd(vma, vmf->pmd, vmf->address); |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 1319 | if (page) |
Kirill A. Shutemov | ddc58f2 | 2016-01-15 16:52:56 -0800 | [diff] [blame] | 1320 | put_page(page); |
Kirill A. Shutemov | 9845cbb | 2014-02-25 15:01:42 -0800 | [diff] [blame] | 1321 | ret |= VM_FAULT_FALLBACK; |
David Rientjes | 17766dd | 2013-09-12 15:14:06 -0700 | [diff] [blame] | 1322 | count_vm_event(THP_FAULT_FALLBACK); |
Andrea Arcangeli | b9bbfbe | 2011-01-13 15:46:57 -0800 | [diff] [blame] | 1323 | goto out; |
| 1324 | } |
| 1325 | |
David Rientjes | 17766dd | 2013-09-12 15:14:06 -0700 | [diff] [blame] | 1326 | count_vm_event(THP_FAULT_ALLOC); |
| 1327 | |
Hugh Dickins | eecc1e4 | 2014-01-12 01:25:21 -0800 | [diff] [blame] | 1328 | if (!page) |
Huang Ying | c79b57e | 2017-09-06 16:25:04 -0700 | [diff] [blame] | 1329 | clear_huge_page(new_page, vmf->address, HPAGE_PMD_NR); |
Kirill A. Shutemov | 93b4796 | 2012-12-12 13:50:54 -0800 | [diff] [blame] | 1330 | else |
| 1331 | copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1332 | __SetPageUptodate(new_page); |
| 1333 | |
Sagi Grimberg | 2ec74c3 | 2012-10-08 16:33:33 -0700 | [diff] [blame] | 1334 | mmun_start = haddr; |
| 1335 | mmun_end = haddr + HPAGE_PMD_SIZE; |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 1336 | mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); |
Sagi Grimberg | 2ec74c3 | 2012-10-08 16:33:33 -0700 | [diff] [blame] | 1337 | |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1338 | spin_lock(vmf->ptl); |
Kirill A. Shutemov | 93b4796 | 2012-12-12 13:50:54 -0800 | [diff] [blame] | 1339 | if (page) |
Kirill A. Shutemov | ddc58f2 | 2016-01-15 16:52:56 -0800 | [diff] [blame] | 1340 | put_page(page); |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1341 | if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { |
| 1342 | spin_unlock(vmf->ptl); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 1343 | mem_cgroup_cancel_charge(new_page, memcg, true); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1344 | put_page(new_page); |
Sagi Grimberg | 2ec74c3 | 2012-10-08 16:33:33 -0700 | [diff] [blame] | 1345 | goto out_mn; |
Andrea Arcangeli | b9bbfbe | 2011-01-13 15:46:57 -0800 | [diff] [blame] | 1346 | } else { |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1347 | pmd_t entry; |
Kirill A. Shutemov | 3122359 | 2013-09-12 15:14:01 -0700 | [diff] [blame] | 1348 | entry = mk_huge_pmd(new_page, vma->vm_page_prot); |
Linus Torvalds | f55e101 | 2017-11-29 09:01:01 -0800 | [diff] [blame] | 1349 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1350 | pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 1351 | page_add_new_anon_rmap(new_page, vma, haddr, true); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 1352 | mem_cgroup_commit_charge(new_page, memcg, false, true); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1353 | lru_cache_add_active_or_unevictable(new_page, vma); |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1354 | set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); |
| 1355 | update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); |
Hugh Dickins | eecc1e4 | 2014-01-12 01:25:21 -0800 | [diff] [blame] | 1356 | if (!page) { |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 1357 | add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 1358 | } else { |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 1359 | VM_BUG_ON_PAGE(!PageHead(page), page); |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 1360 | page_remove_rmap(page, true); |
Kirill A. Shutemov | 93b4796 | 2012-12-12 13:50:54 -0800 | [diff] [blame] | 1361 | put_page(page); |
| 1362 | } |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1363 | ret |= VM_FAULT_WRITE; |
| 1364 | } |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1365 | spin_unlock(vmf->ptl); |
Sagi Grimberg | 2ec74c3 | 2012-10-08 16:33:33 -0700 | [diff] [blame] | 1366 | out_mn: |
Jérôme Glisse | 4645b9f | 2017-11-15 17:34:11 -0800 | [diff] [blame] | 1367 | /* |
| 1368 | * No need to double call mmu_notifier->invalidate_range() callback as |
| 1369 | * the above pmdp_huge_clear_flush_notify() did already call it. |
| 1370 | */ |
| 1371 | mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start, |
| 1372 | mmun_end); |
Sagi Grimberg | 2ec74c3 | 2012-10-08 16:33:33 -0700 | [diff] [blame] | 1373 | out: |
| 1374 | return ret; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1375 | out_unlock: |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1376 | spin_unlock(vmf->ptl); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1377 | return ret; |
| 1378 | } |
| 1379 | |
Keno Fischer | 8310d48 | 2017-01-24 15:17:48 -0800 | [diff] [blame] | 1380 | /* |
| 1381 | * FOLL_FORCE can write to even unwritable pmd's, but only |
| 1382 | * after we've gone through a COW cycle and they are dirty. |
| 1383 | */ |
| 1384 | static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) |
| 1385 | { |
Linus Torvalds | f6f3732 | 2017-12-15 18:53:22 -0800 | [diff] [blame] | 1386 | return pmd_write(pmd) || |
Keno Fischer | 8310d48 | 2017-01-24 15:17:48 -0800 | [diff] [blame] | 1387 | ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); |
| 1388 | } |
| 1389 | |
David Rientjes | b676b29 | 2012-10-08 16:34:03 -0700 | [diff] [blame] | 1390 | struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1391 | unsigned long addr, |
| 1392 | pmd_t *pmd, |
| 1393 | unsigned int flags) |
| 1394 | { |
David Rientjes | b676b29 | 2012-10-08 16:34:03 -0700 | [diff] [blame] | 1395 | struct mm_struct *mm = vma->vm_mm; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1396 | struct page *page = NULL; |
| 1397 | |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1398 | assert_spin_locked(pmd_lockptr(mm, pmd)); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1399 | |
Keno Fischer | 8310d48 | 2017-01-24 15:17:48 -0800 | [diff] [blame] | 1400 | if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags)) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1401 | goto out; |
| 1402 | |
Kirill A. Shutemov | 85facf2 | 2013-02-04 14:28:42 -0800 | [diff] [blame] | 1403 | /* Avoid dumping huge zero page */ |
| 1404 | if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) |
| 1405 | return ERR_PTR(-EFAULT); |
| 1406 | |
Mel Gorman | 2b4847e | 2013-12-18 17:08:32 -0800 | [diff] [blame] | 1407 | /* Full NUMA hinting faults to serialise migration in fault paths */ |
Mel Gorman | 8a0516e | 2015-02-12 14:58:22 -0800 | [diff] [blame] | 1408 | if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) |
Mel Gorman | 2b4847e | 2013-12-18 17:08:32 -0800 | [diff] [blame] | 1409 | goto out; |
| 1410 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1411 | page = pmd_page(*pmd); |
Dan Williams | ca120cf | 2016-09-03 10:38:03 -0700 | [diff] [blame] | 1412 | VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page); |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 1413 | if (flags & FOLL_TOUCH) |
Kirill A. Shutemov | a8f9736 | 2017-11-27 06:21:25 +0300 | [diff] [blame] | 1414 | touch_pmd(vma, addr, pmd, flags); |
Eric B Munson | de60f5f | 2015-11-05 18:51:36 -0800 | [diff] [blame] | 1415 | if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { |
Kirill A. Shutemov | e90309c | 2016-01-15 16:54:33 -0800 | [diff] [blame] | 1416 | /* |
| 1417 | * We don't mlock() pte-mapped THPs. This way we can avoid |
| 1418 | * leaking mlocked pages into non-VM_LOCKED VMAs. |
| 1419 | * |
Kirill A. Shutemov | 9a73f61 | 2016-07-26 15:25:53 -0700 | [diff] [blame] | 1420 | * For anon THP: |
| 1421 | * |
Kirill A. Shutemov | e90309c | 2016-01-15 16:54:33 -0800 | [diff] [blame] | 1422 | * In most cases the pmd is the only mapping of the page as we |
| 1423 | * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for |
| 1424 | * writable private mappings in populate_vma_page_range(). |
| 1425 | * |
| 1426 | * The only scenario when we have the page shared here is if we |
| 1427 | * mlocking read-only mapping shared over fork(). We skip |
| 1428 | * mlocking such pages. |
Kirill A. Shutemov | 9a73f61 | 2016-07-26 15:25:53 -0700 | [diff] [blame] | 1429 | * |
| 1430 | * For file THP: |
| 1431 | * |
| 1432 | * We can expect PageDoubleMap() to be stable under page lock: |
| 1433 | * for file pages we set it in page_add_file_rmap(), which |
| 1434 | * requires page to be locked. |
Kirill A. Shutemov | e90309c | 2016-01-15 16:54:33 -0800 | [diff] [blame] | 1435 | */ |
Kirill A. Shutemov | 9a73f61 | 2016-07-26 15:25:53 -0700 | [diff] [blame] | 1436 | |
| 1437 | if (PageAnon(page) && compound_mapcount(page) != 1) |
| 1438 | goto skip_mlock; |
| 1439 | if (PageDoubleMap(page) || !page->mapping) |
| 1440 | goto skip_mlock; |
| 1441 | if (!trylock_page(page)) |
| 1442 | goto skip_mlock; |
| 1443 | lru_add_drain(); |
| 1444 | if (page->mapping && !PageDoubleMap(page)) |
| 1445 | mlock_vma_page(page); |
| 1446 | unlock_page(page); |
David Rientjes | b676b29 | 2012-10-08 16:34:03 -0700 | [diff] [blame] | 1447 | } |
Kirill A. Shutemov | 9a73f61 | 2016-07-26 15:25:53 -0700 | [diff] [blame] | 1448 | skip_mlock: |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1449 | page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; |
Dan Williams | ca120cf | 2016-09-03 10:38:03 -0700 | [diff] [blame] | 1450 | VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1451 | if (flags & FOLL_GET) |
Kirill A. Shutemov | ddc58f2 | 2016-01-15 16:52:56 -0800 | [diff] [blame] | 1452 | get_page(page); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1453 | |
| 1454 | out: |
| 1455 | return page; |
| 1456 | } |
| 1457 | |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 1458 | /* NUMA hinting page fault entry point for trans huge pmds */ |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1459 | int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 1460 | { |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1461 | struct vm_area_struct *vma = vmf->vma; |
Mel Gorman | b891663 | 2013-10-07 11:28:44 +0100 | [diff] [blame] | 1462 | struct anon_vma *anon_vma = NULL; |
Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1463 | struct page *page; |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1464 | unsigned long haddr = vmf->address & HPAGE_PMD_MASK; |
Mel Gorman | 8191acb | 2013-10-07 11:28:45 +0100 | [diff] [blame] | 1465 | int page_nid = -1, this_nid = numa_node_id(); |
Peter Zijlstra | 9057289 | 2013-10-07 11:29:20 +0100 | [diff] [blame] | 1466 | int target_nid, last_cpupid = -1; |
Mel Gorman | 8191acb | 2013-10-07 11:28:45 +0100 | [diff] [blame] | 1467 | bool page_locked; |
| 1468 | bool migrated = false; |
Mel Gorman | b191f9b | 2015-03-25 15:55:40 -0700 | [diff] [blame] | 1469 | bool was_writable; |
Peter Zijlstra | 6688cc0 | 2013-10-07 11:29:24 +0100 | [diff] [blame] | 1470 | int flags = 0; |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 1471 | |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1472 | vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); |
| 1473 | if (unlikely(!pmd_same(pmd, *vmf->pmd))) |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 1474 | goto out_unlock; |
| 1475 | |
Mel Gorman | de466bd | 2013-12-18 17:08:42 -0800 | [diff] [blame] | 1476 | /* |
| 1477 | * If there are potential migrations, wait for completion and retry |
| 1478 | * without disrupting NUMA hinting information. Do not relock and |
| 1479 | * check_same as the page may no longer be mapped. |
| 1480 | */ |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1481 | if (unlikely(pmd_trans_migrating(*vmf->pmd))) { |
| 1482 | page = pmd_page(*vmf->pmd); |
Mark Rutland | 3c226c6 | 2017-06-16 14:02:34 -0700 | [diff] [blame] | 1483 | if (!get_page_unless_zero(page)) |
| 1484 | goto out_unlock; |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1485 | spin_unlock(vmf->ptl); |
Mel Gorman | 5d83306 | 2015-02-12 14:58:16 -0800 | [diff] [blame] | 1486 | wait_on_page_locked(page); |
Mark Rutland | 3c226c6 | 2017-06-16 14:02:34 -0700 | [diff] [blame] | 1487 | put_page(page); |
Mel Gorman | de466bd | 2013-12-18 17:08:42 -0800 | [diff] [blame] | 1488 | goto out; |
| 1489 | } |
| 1490 | |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 1491 | page = pmd_page(pmd); |
Mel Gorman | a1a4618 | 2013-10-07 11:28:50 +0100 | [diff] [blame] | 1492 | BUG_ON(is_huge_zero_page(page)); |
Mel Gorman | 8191acb | 2013-10-07 11:28:45 +0100 | [diff] [blame] | 1493 | page_nid = page_to_nid(page); |
Peter Zijlstra | 9057289 | 2013-10-07 11:29:20 +0100 | [diff] [blame] | 1494 | last_cpupid = page_cpupid_last(page); |
Mel Gorman | 03c5a6e | 2012-11-02 14:52:48 +0000 | [diff] [blame] | 1495 | count_vm_numa_event(NUMA_HINT_FAULTS); |
Rik van Riel | 04bb2f9 | 2013-10-07 11:29:36 +0100 | [diff] [blame] | 1496 | if (page_nid == this_nid) { |
Mel Gorman | 03c5a6e | 2012-11-02 14:52:48 +0000 | [diff] [blame] | 1497 | count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); |
Rik van Riel | 04bb2f9 | 2013-10-07 11:29:36 +0100 | [diff] [blame] | 1498 | flags |= TNF_FAULT_LOCAL; |
| 1499 | } |
Mel Gorman | 4daae3b | 2012-11-02 11:33:45 +0000 | [diff] [blame] | 1500 | |
Mel Gorman | bea66fb | 2015-03-25 15:55:37 -0700 | [diff] [blame] | 1501 | /* See similar comment in do_numa_page for explanation */ |
Aneesh Kumar K.V | 288bc54 | 2017-02-24 14:59:16 -0800 | [diff] [blame] | 1502 | if (!pmd_savedwrite(pmd)) |
Peter Zijlstra | 6688cc0 | 2013-10-07 11:29:24 +0100 | [diff] [blame] | 1503 | flags |= TNF_NO_GROUP; |
| 1504 | |
| 1505 | /* |
Mel Gorman | ff9042b | 2013-10-07 11:28:43 +0100 | [diff] [blame] | 1506 | * Acquire the page lock to serialise THP migrations but avoid dropping |
| 1507 | * page_table_lock if at all possible |
| 1508 | */ |
Mel Gorman | b891663 | 2013-10-07 11:28:44 +0100 | [diff] [blame] | 1509 | page_locked = trylock_page(page); |
| 1510 | target_nid = mpol_misplaced(page, vma, haddr); |
| 1511 | if (target_nid == -1) { |
| 1512 | /* If the page was locked, there are no parallel migrations */ |
Mel Gorman | a54a407 | 2013-10-07 11:28:46 +0100 | [diff] [blame] | 1513 | if (page_locked) |
Mel Gorman | b891663 | 2013-10-07 11:28:44 +0100 | [diff] [blame] | 1514 | goto clear_pmdnuma; |
Mel Gorman | 2b4847e | 2013-12-18 17:08:32 -0800 | [diff] [blame] | 1515 | } |
Mel Gorman | 4daae3b | 2012-11-02 11:33:45 +0000 | [diff] [blame] | 1516 | |
Mel Gorman | de466bd | 2013-12-18 17:08:42 -0800 | [diff] [blame] | 1517 | /* Migration could have started since the pmd_trans_migrating check */ |
Mel Gorman | 2b4847e | 2013-12-18 17:08:32 -0800 | [diff] [blame] | 1518 | if (!page_locked) { |
Mark Rutland | 3c226c6 | 2017-06-16 14:02:34 -0700 | [diff] [blame] | 1519 | page_nid = -1; |
| 1520 | if (!get_page_unless_zero(page)) |
| 1521 | goto out_unlock; |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1522 | spin_unlock(vmf->ptl); |
Mel Gorman | b891663 | 2013-10-07 11:28:44 +0100 | [diff] [blame] | 1523 | wait_on_page_locked(page); |
Mark Rutland | 3c226c6 | 2017-06-16 14:02:34 -0700 | [diff] [blame] | 1524 | put_page(page); |
Mel Gorman | b891663 | 2013-10-07 11:28:44 +0100 | [diff] [blame] | 1525 | goto out; |
| 1526 | } |
| 1527 | |
Mel Gorman | 2b4847e | 2013-12-18 17:08:32 -0800 | [diff] [blame] | 1528 | /* |
| 1529 | * Page is misplaced. Page lock serialises migrations. Acquire anon_vma |
| 1530 | * to serialises splits |
| 1531 | */ |
Mel Gorman | b891663 | 2013-10-07 11:28:44 +0100 | [diff] [blame] | 1532 | get_page(page); |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1533 | spin_unlock(vmf->ptl); |
Mel Gorman | b891663 | 2013-10-07 11:28:44 +0100 | [diff] [blame] | 1534 | anon_vma = page_lock_anon_vma_read(page); |
Peter Zijlstra | cbee9f8 | 2012-10-25 14:16:43 +0200 | [diff] [blame] | 1535 | |
Peter Zijlstra | c69307d | 2013-10-07 11:28:41 +0100 | [diff] [blame] | 1536 | /* Confirm the PMD did not change while page_table_lock was released */ |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1537 | spin_lock(vmf->ptl); |
| 1538 | if (unlikely(!pmd_same(pmd, *vmf->pmd))) { |
Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1539 | unlock_page(page); |
| 1540 | put_page(page); |
Mel Gorman | a54a407 | 2013-10-07 11:28:46 +0100 | [diff] [blame] | 1541 | page_nid = -1; |
Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1542 | goto out_unlock; |
| 1543 | } |
Mel Gorman | ff9042b | 2013-10-07 11:28:43 +0100 | [diff] [blame] | 1544 | |
Mel Gorman | c3a489c | 2013-12-18 17:08:38 -0800 | [diff] [blame] | 1545 | /* Bail if we fail to protect against THP splits for any reason */ |
| 1546 | if (unlikely(!anon_vma)) { |
| 1547 | put_page(page); |
| 1548 | page_nid = -1; |
| 1549 | goto clear_pmdnuma; |
| 1550 | } |
| 1551 | |
Mel Gorman | a54a407 | 2013-10-07 11:28:46 +0100 | [diff] [blame] | 1552 | /* |
Peter Zijlstra | 8b1b436 | 2017-06-07 18:05:07 +0200 | [diff] [blame] | 1553 | * Since we took the NUMA fault, we must have observed the !accessible |
| 1554 | * bit. Make sure all other CPUs agree with that, to avoid them |
| 1555 | * modifying the page we're about to migrate. |
| 1556 | * |
| 1557 | * Must be done under PTL such that we'll observe the relevant |
Peter Zijlstra | ccde85b | 2017-08-11 14:29:01 +0200 | [diff] [blame] | 1558 | * inc_tlb_flush_pending(). |
| 1559 | * |
| 1560 | * We are not sure a pending tlb flush here is for a huge page |
| 1561 | * mapping or not. Hence use the tlb range variant |
Peter Zijlstra | 8b1b436 | 2017-06-07 18:05:07 +0200 | [diff] [blame] | 1562 | */ |
| 1563 | if (mm_tlb_flush_pending(vma->vm_mm)) |
Peter Zijlstra | ccde85b | 2017-08-11 14:29:01 +0200 | [diff] [blame] | 1564 | flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE); |
Peter Zijlstra | 8b1b436 | 2017-06-07 18:05:07 +0200 | [diff] [blame] | 1565 | |
| 1566 | /* |
Mel Gorman | a54a407 | 2013-10-07 11:28:46 +0100 | [diff] [blame] | 1567 | * Migrate the THP to the requested node, returns with page unlocked |
Mel Gorman | 8a0516e | 2015-02-12 14:58:22 -0800 | [diff] [blame] | 1568 | * and access rights restored. |
Mel Gorman | a54a407 | 2013-10-07 11:28:46 +0100 | [diff] [blame] | 1569 | */ |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1570 | spin_unlock(vmf->ptl); |
Peter Zijlstra | 8b1b436 | 2017-06-07 18:05:07 +0200 | [diff] [blame] | 1571 | |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 1572 | migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma, |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1573 | vmf->pmd, pmd, vmf->address, page, target_nid); |
Peter Zijlstra | 6688cc0 | 2013-10-07 11:29:24 +0100 | [diff] [blame] | 1574 | if (migrated) { |
| 1575 | flags |= TNF_MIGRATED; |
Mel Gorman | 8191acb | 2013-10-07 11:28:45 +0100 | [diff] [blame] | 1576 | page_nid = target_nid; |
Mel Gorman | 074c238 | 2015-03-25 15:55:42 -0700 | [diff] [blame] | 1577 | } else |
| 1578 | flags |= TNF_MIGRATE_FAIL; |
Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1579 | |
Mel Gorman | 8191acb | 2013-10-07 11:28:45 +0100 | [diff] [blame] | 1580 | goto out; |
Mel Gorman | 4daae3b | 2012-11-02 11:33:45 +0000 | [diff] [blame] | 1581 | clear_pmdnuma: |
Mel Gorman | a54a407 | 2013-10-07 11:28:46 +0100 | [diff] [blame] | 1582 | BUG_ON(!PageLocked(page)); |
Aneesh Kumar K.V | 288bc54 | 2017-02-24 14:59:16 -0800 | [diff] [blame] | 1583 | was_writable = pmd_savedwrite(pmd); |
Mel Gorman | 4d94246 | 2015-02-12 14:58:28 -0800 | [diff] [blame] | 1584 | pmd = pmd_modify(pmd, vma->vm_page_prot); |
Mel Gorman | b7b0400 | 2015-03-25 15:55:45 -0700 | [diff] [blame] | 1585 | pmd = pmd_mkyoung(pmd); |
Mel Gorman | b191f9b | 2015-03-25 15:55:40 -0700 | [diff] [blame] | 1586 | if (was_writable) |
| 1587 | pmd = pmd_mkwrite(pmd); |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1588 | set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); |
| 1589 | update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); |
Mel Gorman | a54a407 | 2013-10-07 11:28:46 +0100 | [diff] [blame] | 1590 | unlock_page(page); |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 1591 | out_unlock: |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1592 | spin_unlock(vmf->ptl); |
Mel Gorman | b891663 | 2013-10-07 11:28:44 +0100 | [diff] [blame] | 1593 | |
| 1594 | out: |
| 1595 | if (anon_vma) |
| 1596 | page_unlock_anon_vma_read(anon_vma); |
| 1597 | |
Mel Gorman | 8191acb | 2013-10-07 11:28:45 +0100 | [diff] [blame] | 1598 | if (page_nid != -1) |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 1599 | task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, |
Aneesh Kumar K.V | 9a8b300 | 2017-02-24 14:59:56 -0800 | [diff] [blame] | 1600 | flags); |
Mel Gorman | 8191acb | 2013-10-07 11:28:45 +0100 | [diff] [blame] | 1601 | |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 1602 | return 0; |
| 1603 | } |
| 1604 | |
Huang Ying | 319904a | 2016-07-28 15:48:03 -0700 | [diff] [blame] | 1605 | /* |
| 1606 | * Return true if we do MADV_FREE successfully on entire pmd page. |
| 1607 | * Otherwise, return false. |
| 1608 | */ |
| 1609 | bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 1610 | pmd_t *pmd, unsigned long addr, unsigned long next) |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 1611 | { |
| 1612 | spinlock_t *ptl; |
| 1613 | pmd_t orig_pmd; |
| 1614 | struct page *page; |
| 1615 | struct mm_struct *mm = tlb->mm; |
Huang Ying | 319904a | 2016-07-28 15:48:03 -0700 | [diff] [blame] | 1616 | bool ret = false; |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 1617 | |
Aneesh Kumar K.V | 07e3266 | 2016-12-12 16:42:40 -0800 | [diff] [blame] | 1618 | tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE); |
| 1619 | |
Kirill A. Shutemov | b6ec57f | 2016-01-21 16:40:25 -0800 | [diff] [blame] | 1620 | ptl = pmd_trans_huge_lock(pmd, vma); |
| 1621 | if (!ptl) |
Linus Torvalds | 25eedab | 2016-01-17 18:33:15 -0800 | [diff] [blame] | 1622 | goto out_unlocked; |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 1623 | |
| 1624 | orig_pmd = *pmd; |
Huang Ying | 319904a | 2016-07-28 15:48:03 -0700 | [diff] [blame] | 1625 | if (is_huge_zero_pmd(orig_pmd)) |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 1626 | goto out; |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 1627 | |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 1628 | if (unlikely(!pmd_present(orig_pmd))) { |
| 1629 | VM_BUG_ON(thp_migration_supported() && |
| 1630 | !is_pmd_migration_entry(orig_pmd)); |
| 1631 | goto out; |
| 1632 | } |
| 1633 | |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 1634 | page = pmd_page(orig_pmd); |
| 1635 | /* |
| 1636 | * If other processes are mapping this page, we couldn't discard |
| 1637 | * the page unless they all do MADV_FREE so let's skip the page. |
| 1638 | */ |
| 1639 | if (page_mapcount(page) != 1) |
| 1640 | goto out; |
| 1641 | |
| 1642 | if (!trylock_page(page)) |
| 1643 | goto out; |
| 1644 | |
| 1645 | /* |
| 1646 | * If user want to discard part-pages of THP, split it so MADV_FREE |
| 1647 | * will deactivate only them. |
| 1648 | */ |
| 1649 | if (next - addr != HPAGE_PMD_SIZE) { |
| 1650 | get_page(page); |
| 1651 | spin_unlock(ptl); |
Huang Ying | 9818b8c | 2016-07-14 12:07:12 -0700 | [diff] [blame] | 1652 | split_huge_page(page); |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 1653 | unlock_page(page); |
Kirill A. Shutemov | bbf29ff | 2017-07-06 15:35:28 -0700 | [diff] [blame] | 1654 | put_page(page); |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 1655 | goto out_unlocked; |
| 1656 | } |
| 1657 | |
| 1658 | if (PageDirty(page)) |
| 1659 | ClearPageDirty(page); |
| 1660 | unlock_page(page); |
| 1661 | |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 1662 | if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) { |
Kirill A. Shutemov | 58ceeb6 | 2017-04-13 14:56:26 -0700 | [diff] [blame] | 1663 | pmdp_invalidate(vma, addr, pmd); |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 1664 | orig_pmd = pmd_mkold(orig_pmd); |
| 1665 | orig_pmd = pmd_mkclean(orig_pmd); |
| 1666 | |
| 1667 | set_pmd_at(mm, addr, pmd, orig_pmd); |
| 1668 | tlb_remove_pmd_tlb_entry(tlb, pmd, addr); |
| 1669 | } |
Shaohua Li | 802a3a9 | 2017-05-03 14:52:32 -0700 | [diff] [blame] | 1670 | |
| 1671 | mark_page_lazyfree(page); |
Huang Ying | 319904a | 2016-07-28 15:48:03 -0700 | [diff] [blame] | 1672 | ret = true; |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 1673 | out: |
| 1674 | spin_unlock(ptl); |
| 1675 | out_unlocked: |
| 1676 | return ret; |
| 1677 | } |
| 1678 | |
Aneesh Kumar K.V | 953c66c | 2016-12-12 16:44:32 -0800 | [diff] [blame] | 1679 | static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) |
| 1680 | { |
| 1681 | pgtable_t pgtable; |
| 1682 | |
| 1683 | pgtable = pgtable_trans_huge_withdraw(mm, pmd); |
| 1684 | pte_free(mm, pgtable); |
Kirill A. Shutemov | c481290 | 2017-11-15 17:35:37 -0800 | [diff] [blame] | 1685 | mm_dec_nr_ptes(mm); |
Aneesh Kumar K.V | 953c66c | 2016-12-12 16:44:32 -0800 | [diff] [blame] | 1686 | } |
| 1687 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1688 | int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, |
Shaohua Li | f21760b | 2012-01-12 17:19:16 -0800 | [diff] [blame] | 1689 | pmd_t *pmd, unsigned long addr) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1690 | { |
Kirill A. Shutemov | da14676 | 2015-09-08 14:59:31 -0700 | [diff] [blame] | 1691 | pmd_t orig_pmd; |
Kirill A. Shutemov | bf92915 | 2013-11-14 14:30:54 -0800 | [diff] [blame] | 1692 | spinlock_t *ptl; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1693 | |
Aneesh Kumar K.V | 07e3266 | 2016-12-12 16:42:40 -0800 | [diff] [blame] | 1694 | tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE); |
| 1695 | |
Kirill A. Shutemov | b6ec57f | 2016-01-21 16:40:25 -0800 | [diff] [blame] | 1696 | ptl = __pmd_trans_huge_lock(pmd, vma); |
| 1697 | if (!ptl) |
Kirill A. Shutemov | da14676 | 2015-09-08 14:59:31 -0700 | [diff] [blame] | 1698 | return 0; |
| 1699 | /* |
| 1700 | * For architectures like ppc64 we look at deposited pgtable |
| 1701 | * when calling pmdp_huge_get_and_clear. So do the |
| 1702 | * pgtable_trans_huge_withdraw after finishing pmdp related |
| 1703 | * operations. |
| 1704 | */ |
| 1705 | orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, |
| 1706 | tlb->fullmm); |
| 1707 | tlb_remove_pmd_tlb_entry(tlb, pmd, addr); |
| 1708 | if (vma_is_dax(vma)) { |
Oliver O'Halloran | 3b6521f | 2017-05-08 15:59:43 -0700 | [diff] [blame] | 1709 | if (arch_needs_pgtable_deposit()) |
| 1710 | zap_deposited_table(tlb->mm, pmd); |
Kirill A. Shutemov | da14676 | 2015-09-08 14:59:31 -0700 | [diff] [blame] | 1711 | spin_unlock(ptl); |
| 1712 | if (is_huge_zero_pmd(orig_pmd)) |
Aneesh Kumar K.V | c0f2e17 | 2016-12-12 16:42:31 -0800 | [diff] [blame] | 1713 | tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE); |
Kirill A. Shutemov | da14676 | 2015-09-08 14:59:31 -0700 | [diff] [blame] | 1714 | } else if (is_huge_zero_pmd(orig_pmd)) { |
Oliver O'Halloran | c14a6eb | 2017-05-08 15:59:40 -0700 | [diff] [blame] | 1715 | zap_deposited_table(tlb->mm, pmd); |
Kirill A. Shutemov | da14676 | 2015-09-08 14:59:31 -0700 | [diff] [blame] | 1716 | spin_unlock(ptl); |
Aneesh Kumar K.V | c0f2e17 | 2016-12-12 16:42:31 -0800 | [diff] [blame] | 1717 | tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE); |
Kirill A. Shutemov | da14676 | 2015-09-08 14:59:31 -0700 | [diff] [blame] | 1718 | } else { |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 1719 | struct page *page = NULL; |
| 1720 | int flush_needed = 1; |
| 1721 | |
| 1722 | if (pmd_present(orig_pmd)) { |
| 1723 | page = pmd_page(orig_pmd); |
| 1724 | page_remove_rmap(page, true); |
| 1725 | VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); |
| 1726 | VM_BUG_ON_PAGE(!PageHead(page), page); |
| 1727 | } else if (thp_migration_supported()) { |
| 1728 | swp_entry_t entry; |
| 1729 | |
| 1730 | VM_BUG_ON(!is_pmd_migration_entry(orig_pmd)); |
| 1731 | entry = pmd_to_swp_entry(orig_pmd); |
| 1732 | page = pfn_to_page(swp_offset(entry)); |
| 1733 | flush_needed = 0; |
| 1734 | } else |
| 1735 | WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); |
| 1736 | |
Kirill A. Shutemov | b507238 | 2016-07-26 15:25:34 -0700 | [diff] [blame] | 1737 | if (PageAnon(page)) { |
Oliver O'Halloran | c14a6eb | 2017-05-08 15:59:40 -0700 | [diff] [blame] | 1738 | zap_deposited_table(tlb->mm, pmd); |
Kirill A. Shutemov | b507238 | 2016-07-26 15:25:34 -0700 | [diff] [blame] | 1739 | add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); |
| 1740 | } else { |
Aneesh Kumar K.V | 953c66c | 2016-12-12 16:44:32 -0800 | [diff] [blame] | 1741 | if (arch_needs_pgtable_deposit()) |
| 1742 | zap_deposited_table(tlb->mm, pmd); |
Kirill A. Shutemov | b507238 | 2016-07-26 15:25:34 -0700 | [diff] [blame] | 1743 | add_mm_counter(tlb->mm, MM_FILEPAGES, -HPAGE_PMD_NR); |
| 1744 | } |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 1745 | |
Kirill A. Shutemov | da14676 | 2015-09-08 14:59:31 -0700 | [diff] [blame] | 1746 | spin_unlock(ptl); |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 1747 | if (flush_needed) |
| 1748 | tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE); |
Naoya Horiguchi | 025c5b2 | 2012-03-21 16:33:57 -0700 | [diff] [blame] | 1749 | } |
Kirill A. Shutemov | da14676 | 2015-09-08 14:59:31 -0700 | [diff] [blame] | 1750 | return 1; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1751 | } |
| 1752 | |
Aneesh Kumar K.V | 1dd38b6 | 2016-12-12 16:44:29 -0800 | [diff] [blame] | 1753 | #ifndef pmd_move_must_withdraw |
| 1754 | static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, |
| 1755 | spinlock_t *old_pmd_ptl, |
| 1756 | struct vm_area_struct *vma) |
| 1757 | { |
| 1758 | /* |
| 1759 | * With split pmd lock we also need to move preallocated |
| 1760 | * PTE page table if new_pmd is on different PMD page table. |
| 1761 | * |
| 1762 | * We also don't deposit and withdraw tables for file pages. |
| 1763 | */ |
| 1764 | return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); |
| 1765 | } |
| 1766 | #endif |
| 1767 | |
Naoya Horiguchi | ab6e3d0 | 2017-09-08 16:11:04 -0700 | [diff] [blame] | 1768 | static pmd_t move_soft_dirty_pmd(pmd_t pmd) |
| 1769 | { |
| 1770 | #ifdef CONFIG_MEM_SOFT_DIRTY |
| 1771 | if (unlikely(is_pmd_migration_entry(pmd))) |
| 1772 | pmd = pmd_swp_mksoft_dirty(pmd); |
| 1773 | else if (pmd_present(pmd)) |
| 1774 | pmd = pmd_mksoft_dirty(pmd); |
| 1775 | #endif |
| 1776 | return pmd; |
| 1777 | } |
| 1778 | |
Hugh Dickins | bf8616d | 2016-05-19 17:12:54 -0700 | [diff] [blame] | 1779 | bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, |
Andrea Arcangeli | 37a1c49 | 2011-10-31 17:08:30 -0700 | [diff] [blame] | 1780 | unsigned long new_addr, unsigned long old_end, |
Aaron Lu | 5d19042 | 2016-11-10 17:16:33 +0800 | [diff] [blame] | 1781 | pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush) |
Andrea Arcangeli | 37a1c49 | 2011-10-31 17:08:30 -0700 | [diff] [blame] | 1782 | { |
Kirill A. Shutemov | bf92915 | 2013-11-14 14:30:54 -0800 | [diff] [blame] | 1783 | spinlock_t *old_ptl, *new_ptl; |
Andrea Arcangeli | 37a1c49 | 2011-10-31 17:08:30 -0700 | [diff] [blame] | 1784 | pmd_t pmd; |
Andrea Arcangeli | 37a1c49 | 2011-10-31 17:08:30 -0700 | [diff] [blame] | 1785 | struct mm_struct *mm = vma->vm_mm; |
Aaron Lu | 5d19042 | 2016-11-10 17:16:33 +0800 | [diff] [blame] | 1786 | bool force_flush = false; |
Andrea Arcangeli | 37a1c49 | 2011-10-31 17:08:30 -0700 | [diff] [blame] | 1787 | |
| 1788 | if ((old_addr & ~HPAGE_PMD_MASK) || |
| 1789 | (new_addr & ~HPAGE_PMD_MASK) || |
Hugh Dickins | bf8616d | 2016-05-19 17:12:54 -0700 | [diff] [blame] | 1790 | old_end - old_addr < HPAGE_PMD_SIZE) |
Kirill A. Shutemov | 4b471e8 | 2016-01-15 16:53:39 -0800 | [diff] [blame] | 1791 | return false; |
Andrea Arcangeli | 37a1c49 | 2011-10-31 17:08:30 -0700 | [diff] [blame] | 1792 | |
| 1793 | /* |
| 1794 | * The destination pmd shouldn't be established, free_pgtables() |
| 1795 | * should have release it. |
| 1796 | */ |
| 1797 | if (WARN_ON(!pmd_none(*new_pmd))) { |
| 1798 | VM_BUG_ON(pmd_trans_huge(*new_pmd)); |
Kirill A. Shutemov | 4b471e8 | 2016-01-15 16:53:39 -0800 | [diff] [blame] | 1799 | return false; |
Andrea Arcangeli | 37a1c49 | 2011-10-31 17:08:30 -0700 | [diff] [blame] | 1800 | } |
| 1801 | |
Kirill A. Shutemov | bf92915 | 2013-11-14 14:30:54 -0800 | [diff] [blame] | 1802 | /* |
| 1803 | * We don't have to worry about the ordering of src and dst |
| 1804 | * ptlocks because exclusive mmap_sem prevents deadlock. |
| 1805 | */ |
Kirill A. Shutemov | b6ec57f | 2016-01-21 16:40:25 -0800 | [diff] [blame] | 1806 | old_ptl = __pmd_trans_huge_lock(old_pmd, vma); |
| 1807 | if (old_ptl) { |
Kirill A. Shutemov | bf92915 | 2013-11-14 14:30:54 -0800 | [diff] [blame] | 1808 | new_ptl = pmd_lockptr(mm, new_pmd); |
| 1809 | if (new_ptl != old_ptl) |
| 1810 | spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 1811 | pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); |
Aaron Lu | a2ce266 | 2016-11-29 13:27:31 +0800 | [diff] [blame] | 1812 | if (pmd_present(pmd) && pmd_dirty(pmd)) |
| 1813 | force_flush = true; |
Naoya Horiguchi | 025c5b2 | 2012-03-21 16:33:57 -0700 | [diff] [blame] | 1814 | VM_BUG_ON(!pmd_none(*new_pmd)); |
Kirill A. Shutemov | 3592806 | 2013-12-12 17:12:33 -0800 | [diff] [blame] | 1815 | |
Aneesh Kumar K.V | 1dd38b6 | 2016-12-12 16:44:29 -0800 | [diff] [blame] | 1816 | if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) { |
Aneesh Kumar K.V | b3084f4 | 2014-01-13 11:34:24 +0530 | [diff] [blame] | 1817 | pgtable_t pgtable; |
Kirill A. Shutemov | 3592806 | 2013-12-12 17:12:33 -0800 | [diff] [blame] | 1818 | pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); |
| 1819 | pgtable_trans_huge_deposit(mm, new_pmd, pgtable); |
Kirill A. Shutemov | 3592806 | 2013-12-12 17:12:33 -0800 | [diff] [blame] | 1820 | } |
Naoya Horiguchi | ab6e3d0 | 2017-09-08 16:11:04 -0700 | [diff] [blame] | 1821 | pmd = move_soft_dirty_pmd(pmd); |
| 1822 | set_pmd_at(mm, new_addr, new_pmd, pmd); |
Aneesh Kumar K.V | b3084f4 | 2014-01-13 11:34:24 +0530 | [diff] [blame] | 1823 | if (new_ptl != old_ptl) |
| 1824 | spin_unlock(new_ptl); |
Aaron Lu | 5d19042 | 2016-11-10 17:16:33 +0800 | [diff] [blame] | 1825 | if (force_flush) |
| 1826 | flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); |
| 1827 | else |
| 1828 | *need_flush = true; |
Kirill A. Shutemov | bf92915 | 2013-11-14 14:30:54 -0800 | [diff] [blame] | 1829 | spin_unlock(old_ptl); |
Kirill A. Shutemov | 4b471e8 | 2016-01-15 16:53:39 -0800 | [diff] [blame] | 1830 | return true; |
Andrea Arcangeli | 37a1c49 | 2011-10-31 17:08:30 -0700 | [diff] [blame] | 1831 | } |
Kirill A. Shutemov | 4b471e8 | 2016-01-15 16:53:39 -0800 | [diff] [blame] | 1832 | return false; |
Andrea Arcangeli | 37a1c49 | 2011-10-31 17:08:30 -0700 | [diff] [blame] | 1833 | } |
| 1834 | |
Mel Gorman | f123d74 | 2013-10-07 11:28:49 +0100 | [diff] [blame] | 1835 | /* |
| 1836 | * Returns |
| 1837 | * - 0 if PMD could not be locked |
| 1838 | * - 1 if PMD was locked but protections unchange and TLB flush unnecessary |
| 1839 | * - HPAGE_PMD_NR is protections changed and TLB flush necessary |
| 1840 | */ |
Johannes Weiner | cd7548a | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 1841 | int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
Mel Gorman | e944fd6 | 2015-02-12 14:58:35 -0800 | [diff] [blame] | 1842 | unsigned long addr, pgprot_t newprot, int prot_numa) |
Johannes Weiner | cd7548a | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 1843 | { |
| 1844 | struct mm_struct *mm = vma->vm_mm; |
Kirill A. Shutemov | bf92915 | 2013-11-14 14:30:54 -0800 | [diff] [blame] | 1845 | spinlock_t *ptl; |
Kirill A. Shutemov | 0a85e51d | 2017-04-13 14:56:17 -0700 | [diff] [blame] | 1846 | pmd_t entry; |
| 1847 | bool preserve_write; |
| 1848 | int ret; |
Johannes Weiner | cd7548a | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 1849 | |
Kirill A. Shutemov | b6ec57f | 2016-01-21 16:40:25 -0800 | [diff] [blame] | 1850 | ptl = __pmd_trans_huge_lock(pmd, vma); |
Kirill A. Shutemov | 0a85e51d | 2017-04-13 14:56:17 -0700 | [diff] [blame] | 1851 | if (!ptl) |
| 1852 | return 0; |
Mel Gorman | e944fd6 | 2015-02-12 14:58:35 -0800 | [diff] [blame] | 1853 | |
Kirill A. Shutemov | 0a85e51d | 2017-04-13 14:56:17 -0700 | [diff] [blame] | 1854 | preserve_write = prot_numa && pmd_write(*pmd); |
| 1855 | ret = 1; |
Mel Gorman | e944fd6 | 2015-02-12 14:58:35 -0800 | [diff] [blame] | 1856 | |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 1857 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION |
| 1858 | if (is_swap_pmd(*pmd)) { |
| 1859 | swp_entry_t entry = pmd_to_swp_entry(*pmd); |
| 1860 | |
| 1861 | VM_BUG_ON(!is_pmd_migration_entry(*pmd)); |
| 1862 | if (is_write_migration_entry(entry)) { |
| 1863 | pmd_t newpmd; |
| 1864 | /* |
| 1865 | * A protection check is difficult so |
| 1866 | * just be safe and disable write |
| 1867 | */ |
| 1868 | make_migration_entry_read(&entry); |
| 1869 | newpmd = swp_entry_to_pmd(entry); |
Naoya Horiguchi | ab6e3d0 | 2017-09-08 16:11:04 -0700 | [diff] [blame] | 1870 | if (pmd_swp_soft_dirty(*pmd)) |
| 1871 | newpmd = pmd_swp_mksoft_dirty(newpmd); |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 1872 | set_pmd_at(mm, addr, pmd, newpmd); |
| 1873 | } |
| 1874 | goto unlock; |
| 1875 | } |
| 1876 | #endif |
| 1877 | |
Kirill A. Shutemov | 0a85e51d | 2017-04-13 14:56:17 -0700 | [diff] [blame] | 1878 | /* |
| 1879 | * Avoid trapping faults against the zero page. The read-only |
| 1880 | * data is likely to be read-cached on the local CPU and |
| 1881 | * local/remote hits to the zero page are not interesting. |
| 1882 | */ |
| 1883 | if (prot_numa && is_huge_zero_pmd(*pmd)) |
| 1884 | goto unlock; |
Johannes Weiner | cd7548a | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 1885 | |
Kirill A. Shutemov | 0a85e51d | 2017-04-13 14:56:17 -0700 | [diff] [blame] | 1886 | if (prot_numa && pmd_protnone(*pmd)) |
| 1887 | goto unlock; |
| 1888 | |
Kirill A. Shutemov | ced1080 | 2017-04-13 14:56:20 -0700 | [diff] [blame] | 1889 | /* |
| 1890 | * In case prot_numa, we are under down_read(mmap_sem). It's critical |
| 1891 | * to not clear pmd intermittently to avoid race with MADV_DONTNEED |
| 1892 | * which is also under down_read(mmap_sem): |
| 1893 | * |
| 1894 | * CPU0: CPU1: |
| 1895 | * change_huge_pmd(prot_numa=1) |
| 1896 | * pmdp_huge_get_and_clear_notify() |
| 1897 | * madvise_dontneed() |
| 1898 | * zap_pmd_range() |
| 1899 | * pmd_trans_huge(*pmd) == 0 (without ptl) |
| 1900 | * // skip the pmd |
| 1901 | * set_pmd_at(); |
| 1902 | * // pmd is re-established |
| 1903 | * |
| 1904 | * The race makes MADV_DONTNEED miss the huge pmd and don't clear it |
| 1905 | * which may break userspace. |
| 1906 | * |
| 1907 | * pmdp_invalidate() is required to make sure we don't miss |
| 1908 | * dirty/young flags set by hardware. |
| 1909 | */ |
Kirill A. Shutemov | a3cf988 | 2018-01-31 16:18:20 -0800 | [diff] [blame] | 1910 | entry = pmdp_invalidate(vma, addr, pmd); |
Kirill A. Shutemov | ced1080 | 2017-04-13 14:56:20 -0700 | [diff] [blame] | 1911 | |
Kirill A. Shutemov | 0a85e51d | 2017-04-13 14:56:17 -0700 | [diff] [blame] | 1912 | entry = pmd_modify(entry, newprot); |
| 1913 | if (preserve_write) |
| 1914 | entry = pmd_mk_savedwrite(entry); |
| 1915 | ret = HPAGE_PMD_NR; |
| 1916 | set_pmd_at(mm, addr, pmd, entry); |
| 1917 | BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry)); |
| 1918 | unlock: |
| 1919 | spin_unlock(ptl); |
Johannes Weiner | cd7548a | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 1920 | return ret; |
| 1921 | } |
| 1922 | |
Naoya Horiguchi | 025c5b2 | 2012-03-21 16:33:57 -0700 | [diff] [blame] | 1923 | /* |
Huang Ying | 8f19b0c | 2016-07-26 15:27:04 -0700 | [diff] [blame] | 1924 | * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise. |
Naoya Horiguchi | 025c5b2 | 2012-03-21 16:33:57 -0700 | [diff] [blame] | 1925 | * |
Huang Ying | 8f19b0c | 2016-07-26 15:27:04 -0700 | [diff] [blame] | 1926 | * Note that if it returns page table lock pointer, this routine returns without |
| 1927 | * unlocking page table lock. So callers must unlock it. |
Naoya Horiguchi | 025c5b2 | 2012-03-21 16:33:57 -0700 | [diff] [blame] | 1928 | */ |
Kirill A. Shutemov | b6ec57f | 2016-01-21 16:40:25 -0800 | [diff] [blame] | 1929 | spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) |
Naoya Horiguchi | 025c5b2 | 2012-03-21 16:33:57 -0700 | [diff] [blame] | 1930 | { |
Kirill A. Shutemov | b6ec57f | 2016-01-21 16:40:25 -0800 | [diff] [blame] | 1931 | spinlock_t *ptl; |
| 1932 | ptl = pmd_lock(vma->vm_mm, pmd); |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 1933 | if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || |
| 1934 | pmd_devmap(*pmd))) |
Kirill A. Shutemov | b6ec57f | 2016-01-21 16:40:25 -0800 | [diff] [blame] | 1935 | return ptl; |
| 1936 | spin_unlock(ptl); |
| 1937 | return NULL; |
Naoya Horiguchi | 025c5b2 | 2012-03-21 16:33:57 -0700 | [diff] [blame] | 1938 | } |
| 1939 | |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1940 | /* |
| 1941 | * Returns true if a given pud maps a thp, false otherwise. |
| 1942 | * |
| 1943 | * Note that if it returns true, this routine returns without unlocking page |
| 1944 | * table lock. So callers must unlock it. |
| 1945 | */ |
| 1946 | spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) |
| 1947 | { |
| 1948 | spinlock_t *ptl; |
| 1949 | |
| 1950 | ptl = pud_lock(vma->vm_mm, pud); |
| 1951 | if (likely(pud_trans_huge(*pud) || pud_devmap(*pud))) |
| 1952 | return ptl; |
| 1953 | spin_unlock(ptl); |
| 1954 | return NULL; |
| 1955 | } |
| 1956 | |
| 1957 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
| 1958 | int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, |
| 1959 | pud_t *pud, unsigned long addr) |
| 1960 | { |
| 1961 | pud_t orig_pud; |
| 1962 | spinlock_t *ptl; |
| 1963 | |
| 1964 | ptl = __pud_trans_huge_lock(pud, vma); |
| 1965 | if (!ptl) |
| 1966 | return 0; |
| 1967 | /* |
| 1968 | * For architectures like ppc64 we look at deposited pgtable |
| 1969 | * when calling pudp_huge_get_and_clear. So do the |
| 1970 | * pgtable_trans_huge_withdraw after finishing pudp related |
| 1971 | * operations. |
| 1972 | */ |
| 1973 | orig_pud = pudp_huge_get_and_clear_full(tlb->mm, addr, pud, |
| 1974 | tlb->fullmm); |
| 1975 | tlb_remove_pud_tlb_entry(tlb, pud, addr); |
| 1976 | if (vma_is_dax(vma)) { |
| 1977 | spin_unlock(ptl); |
| 1978 | /* No zero page support yet */ |
| 1979 | } else { |
| 1980 | /* No support for anonymous PUD pages yet */ |
| 1981 | BUG(); |
| 1982 | } |
| 1983 | return 1; |
| 1984 | } |
| 1985 | |
| 1986 | static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, |
| 1987 | unsigned long haddr) |
| 1988 | { |
| 1989 | VM_BUG_ON(haddr & ~HPAGE_PUD_MASK); |
| 1990 | VM_BUG_ON_VMA(vma->vm_start > haddr, vma); |
| 1991 | VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); |
| 1992 | VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud)); |
| 1993 | |
Yisheng Xie | ce9311c | 2017-03-09 16:17:00 -0800 | [diff] [blame] | 1994 | count_vm_event(THP_SPLIT_PUD); |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 1995 | |
| 1996 | pudp_huge_clear_flush_notify(vma, haddr, pud); |
| 1997 | } |
| 1998 | |
| 1999 | void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, |
| 2000 | unsigned long address) |
| 2001 | { |
| 2002 | spinlock_t *ptl; |
| 2003 | struct mm_struct *mm = vma->vm_mm; |
| 2004 | unsigned long haddr = address & HPAGE_PUD_MASK; |
| 2005 | |
| 2006 | mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PUD_SIZE); |
| 2007 | ptl = pud_lock(mm, pud); |
| 2008 | if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud))) |
| 2009 | goto out; |
| 2010 | __split_huge_pud_locked(vma, pud, haddr); |
| 2011 | |
| 2012 | out: |
| 2013 | spin_unlock(ptl); |
Jérôme Glisse | 4645b9f | 2017-11-15 17:34:11 -0800 | [diff] [blame] | 2014 | /* |
| 2015 | * No need to double call mmu_notifier->invalidate_range() callback as |
| 2016 | * the above pudp_huge_clear_flush_notify() did already call it. |
| 2017 | */ |
| 2018 | mmu_notifier_invalidate_range_only_end(mm, haddr, haddr + |
| 2019 | HPAGE_PUD_SIZE); |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 2020 | } |
| 2021 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
| 2022 | |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2023 | static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, |
| 2024 | unsigned long haddr, pmd_t *pmd) |
| 2025 | { |
| 2026 | struct mm_struct *mm = vma->vm_mm; |
| 2027 | pgtable_t pgtable; |
| 2028 | pmd_t _pmd; |
| 2029 | int i; |
| 2030 | |
Jérôme Glisse | 0f10851 | 2017-11-15 17:34:07 -0800 | [diff] [blame] | 2031 | /* |
| 2032 | * Leave pmd empty until pte is filled note that it is fine to delay |
| 2033 | * notification until mmu_notifier_invalidate_range_end() as we are |
| 2034 | * replacing a zero pmd write protected page with a zero pte write |
| 2035 | * protected page. |
| 2036 | * |
Mike Rapoport | ad56b73 | 2018-03-21 21:22:47 +0200 | [diff] [blame] | 2037 | * See Documentation/vm/mmu_notifier.rst |
Jérôme Glisse | 0f10851 | 2017-11-15 17:34:07 -0800 | [diff] [blame] | 2038 | */ |
| 2039 | pmdp_huge_clear_flush(vma, haddr, pmd); |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2040 | |
| 2041 | pgtable = pgtable_trans_huge_withdraw(mm, pmd); |
| 2042 | pmd_populate(mm, &_pmd, pgtable); |
| 2043 | |
| 2044 | for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { |
| 2045 | pte_t *pte, entry; |
| 2046 | entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); |
| 2047 | entry = pte_mkspecial(entry); |
| 2048 | pte = pte_offset_map(&_pmd, haddr); |
| 2049 | VM_BUG_ON(!pte_none(*pte)); |
| 2050 | set_pte_at(mm, haddr, pte, entry); |
| 2051 | pte_unmap(pte); |
| 2052 | } |
| 2053 | smp_wmb(); /* make pte visible before pmd */ |
| 2054 | pmd_populate(mm, pmd, pgtable); |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2055 | } |
| 2056 | |
| 2057 | static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, |
Kirill A. Shutemov | ba98828 | 2016-01-15 16:53:56 -0800 | [diff] [blame] | 2058 | unsigned long haddr, bool freeze) |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2059 | { |
| 2060 | struct mm_struct *mm = vma->vm_mm; |
| 2061 | struct page *page; |
| 2062 | pgtable_t pgtable; |
Aneesh Kumar K.V | 423ac9a | 2018-01-31 16:18:24 -0800 | [diff] [blame] | 2063 | pmd_t old_pmd, _pmd; |
Kirill A. Shutemov | a3cf988 | 2018-01-31 16:18:20 -0800 | [diff] [blame] | 2064 | bool young, write, soft_dirty, pmd_migration = false; |
Kirill A. Shutemov | 2ac015e | 2016-02-24 18:58:03 +0300 | [diff] [blame] | 2065 | unsigned long addr; |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2066 | int i; |
| 2067 | |
| 2068 | VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); |
| 2069 | VM_BUG_ON_VMA(vma->vm_start > haddr, vma); |
| 2070 | VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 2071 | VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd) |
| 2072 | && !pmd_devmap(*pmd)); |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2073 | |
| 2074 | count_vm_event(THP_SPLIT_PMD); |
| 2075 | |
Kirill A. Shutemov | d21b9e5 | 2016-07-26 15:25:37 -0700 | [diff] [blame] | 2076 | if (!vma_is_anonymous(vma)) { |
| 2077 | _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); |
Aneesh Kumar K.V | 953c66c | 2016-12-12 16:44:32 -0800 | [diff] [blame] | 2078 | /* |
| 2079 | * We are going to unmap this huge page. So |
| 2080 | * just go ahead and zap it |
| 2081 | */ |
| 2082 | if (arch_needs_pgtable_deposit()) |
| 2083 | zap_deposited_table(mm, pmd); |
Kirill A. Shutemov | d21b9e5 | 2016-07-26 15:25:37 -0700 | [diff] [blame] | 2084 | if (vma_is_dax(vma)) |
| 2085 | return; |
| 2086 | page = pmd_page(_pmd); |
Hugh Dickins | e1f1b15 | 2018-07-20 17:53:45 -0700 | [diff] [blame] | 2087 | if (!PageDirty(page) && pmd_dirty(_pmd)) |
| 2088 | set_page_dirty(page); |
Kirill A. Shutemov | d21b9e5 | 2016-07-26 15:25:37 -0700 | [diff] [blame] | 2089 | if (!PageReferenced(page) && pmd_young(_pmd)) |
| 2090 | SetPageReferenced(page); |
| 2091 | page_remove_rmap(page, true); |
| 2092 | put_page(page); |
| 2093 | add_mm_counter(mm, MM_FILEPAGES, -HPAGE_PMD_NR); |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2094 | return; |
| 2095 | } else if (is_huge_zero_pmd(*pmd)) { |
Jérôme Glisse | 4645b9f | 2017-11-15 17:34:11 -0800 | [diff] [blame] | 2096 | /* |
| 2097 | * FIXME: Do we want to invalidate secondary mmu by calling |
| 2098 | * mmu_notifier_invalidate_range() see comments below inside |
| 2099 | * __split_huge_pmd() ? |
| 2100 | * |
| 2101 | * We are going from a zero huge page write protected to zero |
| 2102 | * small page also write protected so it does not seems useful |
| 2103 | * to invalidate secondary mmu at this time. |
| 2104 | */ |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2105 | return __split_huge_zero_page_pmd(vma, haddr, pmd); |
| 2106 | } |
| 2107 | |
Aneesh Kumar K.V | 423ac9a | 2018-01-31 16:18:24 -0800 | [diff] [blame] | 2108 | /* |
| 2109 | * Up to this point the pmd is present and huge and userland has the |
| 2110 | * whole access to the hugepage during the split (which happens in |
| 2111 | * place). If we overwrite the pmd with the not-huge version pointing |
| 2112 | * to the pte here (which of course we could if all CPUs were bug |
| 2113 | * free), userland could trigger a small page size TLB miss on the |
| 2114 | * small sized TLB while the hugepage TLB entry is still established in |
| 2115 | * the huge TLB. Some CPU doesn't like that. |
| 2116 | * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum |
| 2117 | * 383 on page 93. Intel should be safe but is also warns that it's |
| 2118 | * only safe if the permission and cache attributes of the two entries |
| 2119 | * loaded in the two TLB is identical (which should be the case here). |
| 2120 | * But it is generally safer to never allow small and huge TLB entries |
| 2121 | * for the same virtual address to be loaded simultaneously. So instead |
| 2122 | * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the |
| 2123 | * current pmd notpresent (atomically because here the pmd_trans_huge |
| 2124 | * must remain set at all times on the pmd until the split is complete |
| 2125 | * for this pmd), then we flush the SMP TLB and finally we write the |
| 2126 | * non-huge version of the pmd entry with pmd_populate. |
| 2127 | */ |
| 2128 | old_pmd = pmdp_invalidate(vma, haddr, pmd); |
| 2129 | |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 2130 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION |
Aneesh Kumar K.V | 423ac9a | 2018-01-31 16:18:24 -0800 | [diff] [blame] | 2131 | pmd_migration = is_pmd_migration_entry(old_pmd); |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 2132 | if (pmd_migration) { |
| 2133 | swp_entry_t entry; |
| 2134 | |
Aneesh Kumar K.V | 423ac9a | 2018-01-31 16:18:24 -0800 | [diff] [blame] | 2135 | entry = pmd_to_swp_entry(old_pmd); |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 2136 | page = pfn_to_page(swp_offset(entry)); |
| 2137 | } else |
| 2138 | #endif |
Aneesh Kumar K.V | 423ac9a | 2018-01-31 16:18:24 -0800 | [diff] [blame] | 2139 | page = pmd_page(old_pmd); |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2140 | VM_BUG_ON_PAGE(!page_count(page), page); |
Joonsoo Kim | fe896d1 | 2016-03-17 14:19:26 -0700 | [diff] [blame] | 2141 | page_ref_add(page, HPAGE_PMD_NR - 1); |
Aneesh Kumar K.V | 423ac9a | 2018-01-31 16:18:24 -0800 | [diff] [blame] | 2142 | if (pmd_dirty(old_pmd)) |
| 2143 | SetPageDirty(page); |
| 2144 | write = pmd_write(old_pmd); |
| 2145 | young = pmd_young(old_pmd); |
| 2146 | soft_dirty = pmd_soft_dirty(old_pmd); |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2147 | |
Aneesh Kumar K.V | 423ac9a | 2018-01-31 16:18:24 -0800 | [diff] [blame] | 2148 | /* |
| 2149 | * Withdraw the table only after we mark the pmd entry invalid. |
| 2150 | * This's critical for some architectures (Power). |
| 2151 | */ |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2152 | pgtable = pgtable_trans_huge_withdraw(mm, pmd); |
| 2153 | pmd_populate(mm, &_pmd, pgtable); |
| 2154 | |
Kirill A. Shutemov | 2ac015e | 2016-02-24 18:58:03 +0300 | [diff] [blame] | 2155 | for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2156 | pte_t entry, *pte; |
| 2157 | /* |
| 2158 | * Note that NUMA hinting access restrictions are not |
| 2159 | * transferred to avoid any possibility of altering |
| 2160 | * permissions across VMAs. |
| 2161 | */ |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 2162 | if (freeze || pmd_migration) { |
Kirill A. Shutemov | ba98828 | 2016-01-15 16:53:56 -0800 | [diff] [blame] | 2163 | swp_entry_t swp_entry; |
| 2164 | swp_entry = make_migration_entry(page + i, write); |
| 2165 | entry = swp_entry_to_pte(swp_entry); |
Andrea Arcangeli | 804dd15 | 2016-08-25 15:16:57 -0700 | [diff] [blame] | 2166 | if (soft_dirty) |
| 2167 | entry = pte_swp_mksoft_dirty(entry); |
Kirill A. Shutemov | ba98828 | 2016-01-15 16:53:56 -0800 | [diff] [blame] | 2168 | } else { |
Andrea Arcangeli | 6d2329f | 2016-10-07 17:01:22 -0700 | [diff] [blame] | 2169 | entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 2170 | entry = maybe_mkwrite(entry, vma); |
Kirill A. Shutemov | ba98828 | 2016-01-15 16:53:56 -0800 | [diff] [blame] | 2171 | if (!write) |
| 2172 | entry = pte_wrprotect(entry); |
| 2173 | if (!young) |
| 2174 | entry = pte_mkold(entry); |
Andrea Arcangeli | 804dd15 | 2016-08-25 15:16:57 -0700 | [diff] [blame] | 2175 | if (soft_dirty) |
| 2176 | entry = pte_mksoft_dirty(entry); |
Kirill A. Shutemov | ba98828 | 2016-01-15 16:53:56 -0800 | [diff] [blame] | 2177 | } |
Kirill A. Shutemov | 2ac015e | 2016-02-24 18:58:03 +0300 | [diff] [blame] | 2178 | pte = pte_offset_map(&_pmd, addr); |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2179 | BUG_ON(!pte_none(*pte)); |
Kirill A. Shutemov | 2ac015e | 2016-02-24 18:58:03 +0300 | [diff] [blame] | 2180 | set_pte_at(mm, addr, pte, entry); |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2181 | atomic_inc(&page[i]._mapcount); |
| 2182 | pte_unmap(pte); |
| 2183 | } |
| 2184 | |
| 2185 | /* |
| 2186 | * Set PG_double_map before dropping compound_mapcount to avoid |
| 2187 | * false-negative page_mapped(). |
| 2188 | */ |
| 2189 | if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) { |
| 2190 | for (i = 0; i < HPAGE_PMD_NR; i++) |
| 2191 | atomic_inc(&page[i]._mapcount); |
| 2192 | } |
| 2193 | |
| 2194 | if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { |
| 2195 | /* Last compound_mapcount is gone. */ |
Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 2196 | __dec_node_page_state(page, NR_ANON_THPS); |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2197 | if (TestClearPageDoubleMap(page)) { |
| 2198 | /* No need in mapcount reference anymore */ |
| 2199 | for (i = 0; i < HPAGE_PMD_NR; i++) |
| 2200 | atomic_dec(&page[i]._mapcount); |
| 2201 | } |
| 2202 | } |
| 2203 | |
| 2204 | smp_wmb(); /* make pte visible before pmd */ |
| 2205 | pmd_populate(mm, pmd, pgtable); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2206 | |
| 2207 | if (freeze) { |
Kirill A. Shutemov | 2ac015e | 2016-02-24 18:58:03 +0300 | [diff] [blame] | 2208 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2209 | page_remove_rmap(page + i, false); |
| 2210 | put_page(page + i); |
| 2211 | } |
| 2212 | } |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2213 | } |
| 2214 | |
| 2215 | void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
Naoya Horiguchi | 33f4751 | 2016-07-14 12:07:32 -0700 | [diff] [blame] | 2216 | unsigned long address, bool freeze, struct page *page) |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2217 | { |
| 2218 | spinlock_t *ptl; |
| 2219 | struct mm_struct *mm = vma->vm_mm; |
| 2220 | unsigned long haddr = address & HPAGE_PMD_MASK; |
| 2221 | |
| 2222 | mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE); |
| 2223 | ptl = pmd_lock(mm, pmd); |
Naoya Horiguchi | 33f4751 | 2016-07-14 12:07:32 -0700 | [diff] [blame] | 2224 | |
| 2225 | /* |
| 2226 | * If caller asks to setup a migration entries, we need a page to check |
| 2227 | * pmd against. Otherwise we can end up replacing wrong page. |
| 2228 | */ |
| 2229 | VM_BUG_ON(freeze && !page); |
| 2230 | if (page && page != pmd_page(*pmd)) |
| 2231 | goto out; |
| 2232 | |
Dan Williams | 5c7fb56 | 2016-01-15 16:56:52 -0800 | [diff] [blame] | 2233 | if (pmd_trans_huge(*pmd)) { |
Naoya Horiguchi | 33f4751 | 2016-07-14 12:07:32 -0700 | [diff] [blame] | 2234 | page = pmd_page(*pmd); |
Dan Williams | 5c7fb56 | 2016-01-15 16:56:52 -0800 | [diff] [blame] | 2235 | if (PageMlocked(page)) |
Kirill A. Shutemov | 5f73771 | 2016-03-17 14:20:13 -0700 | [diff] [blame] | 2236 | clear_page_mlock(page); |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 2237 | } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd))) |
Kirill A. Shutemov | e90309c | 2016-01-15 16:54:33 -0800 | [diff] [blame] | 2238 | goto out; |
Kirill A. Shutemov | fec89c1 | 2016-03-17 14:20:10 -0700 | [diff] [blame] | 2239 | __split_huge_pmd_locked(vma, pmd, haddr, freeze); |
Kirill A. Shutemov | e90309c | 2016-01-15 16:54:33 -0800 | [diff] [blame] | 2240 | out: |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2241 | spin_unlock(ptl); |
Jérôme Glisse | 4645b9f | 2017-11-15 17:34:11 -0800 | [diff] [blame] | 2242 | /* |
| 2243 | * No need to double call mmu_notifier->invalidate_range() callback. |
| 2244 | * They are 3 cases to consider inside __split_huge_pmd_locked(): |
| 2245 | * 1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious |
| 2246 | * 2) __split_huge_zero_page_pmd() read only zero page and any write |
| 2247 | * fault will trigger a flush_notify before pointing to a new page |
| 2248 | * (it is fine if the secondary mmu keeps pointing to the old zero |
| 2249 | * page in the meantime) |
| 2250 | * 3) Split a huge pmd into pte pointing to the same page. No need |
| 2251 | * to invalidate secondary tlb entry they are all still valid. |
| 2252 | * any further changes to individual pte will notify. So no need |
| 2253 | * to call mmu_notifier->invalidate_range() |
| 2254 | */ |
| 2255 | mmu_notifier_invalidate_range_only_end(mm, haddr, haddr + |
| 2256 | HPAGE_PMD_SIZE); |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2257 | } |
| 2258 | |
Kirill A. Shutemov | fec89c1 | 2016-03-17 14:20:10 -0700 | [diff] [blame] | 2259 | void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, |
| 2260 | bool freeze, struct page *page) |
Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 2261 | { |
Hugh Dickins | f72e7dc | 2014-06-23 13:22:05 -0700 | [diff] [blame] | 2262 | pgd_t *pgd; |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 2263 | p4d_t *p4d; |
Hugh Dickins | f72e7dc | 2014-06-23 13:22:05 -0700 | [diff] [blame] | 2264 | pud_t *pud; |
Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 2265 | pmd_t *pmd; |
| 2266 | |
Kirill A. Shutemov | 78ddc53 | 2016-01-15 16:52:42 -0800 | [diff] [blame] | 2267 | pgd = pgd_offset(vma->vm_mm, address); |
Hugh Dickins | f72e7dc | 2014-06-23 13:22:05 -0700 | [diff] [blame] | 2268 | if (!pgd_present(*pgd)) |
| 2269 | return; |
| 2270 | |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 2271 | p4d = p4d_offset(pgd, address); |
| 2272 | if (!p4d_present(*p4d)) |
| 2273 | return; |
| 2274 | |
| 2275 | pud = pud_offset(p4d, address); |
Hugh Dickins | f72e7dc | 2014-06-23 13:22:05 -0700 | [diff] [blame] | 2276 | if (!pud_present(*pud)) |
| 2277 | return; |
| 2278 | |
| 2279 | pmd = pmd_offset(pud, address); |
Kirill A. Shutemov | fec89c1 | 2016-03-17 14:20:10 -0700 | [diff] [blame] | 2280 | |
Naoya Horiguchi | 33f4751 | 2016-07-14 12:07:32 -0700 | [diff] [blame] | 2281 | __split_huge_pmd(vma, pmd, address, freeze, page); |
Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 2282 | } |
| 2283 | |
Kirill A. Shutemov | e1b9996 | 2015-09-08 14:58:37 -0700 | [diff] [blame] | 2284 | void vma_adjust_trans_huge(struct vm_area_struct *vma, |
Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 2285 | unsigned long start, |
| 2286 | unsigned long end, |
| 2287 | long adjust_next) |
| 2288 | { |
| 2289 | /* |
| 2290 | * If the new start address isn't hpage aligned and it could |
| 2291 | * previously contain an hugepage: check if we need to split |
| 2292 | * an huge pmd. |
| 2293 | */ |
| 2294 | if (start & ~HPAGE_PMD_MASK && |
| 2295 | (start & HPAGE_PMD_MASK) >= vma->vm_start && |
| 2296 | (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) |
Kirill A. Shutemov | fec89c1 | 2016-03-17 14:20:10 -0700 | [diff] [blame] | 2297 | split_huge_pmd_address(vma, start, false, NULL); |
Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 2298 | |
| 2299 | /* |
| 2300 | * If the new end address isn't hpage aligned and it could |
| 2301 | * previously contain an hugepage: check if we need to split |
| 2302 | * an huge pmd. |
| 2303 | */ |
| 2304 | if (end & ~HPAGE_PMD_MASK && |
| 2305 | (end & HPAGE_PMD_MASK) >= vma->vm_start && |
| 2306 | (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) |
Kirill A. Shutemov | fec89c1 | 2016-03-17 14:20:10 -0700 | [diff] [blame] | 2307 | split_huge_pmd_address(vma, end, false, NULL); |
Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 2308 | |
| 2309 | /* |
| 2310 | * If we're also updating the vma->vm_next->vm_start, if the new |
| 2311 | * vm_next->vm_start isn't page aligned and it could previously |
| 2312 | * contain an hugepage: check if we need to split an huge pmd. |
| 2313 | */ |
| 2314 | if (adjust_next > 0) { |
| 2315 | struct vm_area_struct *next = vma->vm_next; |
| 2316 | unsigned long nstart = next->vm_start; |
| 2317 | nstart += adjust_next << PAGE_SHIFT; |
| 2318 | if (nstart & ~HPAGE_PMD_MASK && |
| 2319 | (nstart & HPAGE_PMD_MASK) >= next->vm_start && |
| 2320 | (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) |
Kirill A. Shutemov | fec89c1 | 2016-03-17 14:20:10 -0700 | [diff] [blame] | 2321 | split_huge_pmd_address(next, nstart, false, NULL); |
Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 2322 | } |
| 2323 | } |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2324 | |
Kirill A. Shutemov | fec89c1 | 2016-03-17 14:20:10 -0700 | [diff] [blame] | 2325 | static void freeze_page(struct page *page) |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2326 | { |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2327 | enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS | |
Kirill A. Shutemov | c7ab0d2 | 2017-02-24 14:58:01 -0800 | [diff] [blame] | 2328 | TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD; |
Minchan Kim | 666e5a4 | 2017-05-03 14:54:20 -0700 | [diff] [blame] | 2329 | bool unmap_success; |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2330 | |
| 2331 | VM_BUG_ON_PAGE(!PageHead(page), page); |
| 2332 | |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2333 | if (PageAnon(page)) |
Naoya Horiguchi | b5ff816 | 2017-09-08 16:10:49 -0700 | [diff] [blame] | 2334 | ttu_flags |= TTU_SPLIT_FREEZE; |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2335 | |
Minchan Kim | 666e5a4 | 2017-05-03 14:54:20 -0700 | [diff] [blame] | 2336 | unmap_success = try_to_unmap(page, ttu_flags); |
| 2337 | VM_BUG_ON_PAGE(!unmap_success, page); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2338 | } |
| 2339 | |
Kirill A. Shutemov | fec89c1 | 2016-03-17 14:20:10 -0700 | [diff] [blame] | 2340 | static void unfreeze_page(struct page *page) |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2341 | { |
Kirill A. Shutemov | fec89c1 | 2016-03-17 14:20:10 -0700 | [diff] [blame] | 2342 | int i; |
Kirill A. Shutemov | ace71a1 | 2017-02-24 14:57:45 -0800 | [diff] [blame] | 2343 | if (PageTransHuge(page)) { |
| 2344 | remove_migration_ptes(page, page, true); |
| 2345 | } else { |
| 2346 | for (i = 0; i < HPAGE_PMD_NR; i++) |
| 2347 | remove_migration_ptes(page + i, page + i, true); |
| 2348 | } |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2349 | } |
| 2350 | |
Kirill A. Shutemov | 8df651c | 2016-03-15 14:57:30 -0700 | [diff] [blame] | 2351 | static void __split_huge_page_tail(struct page *head, int tail, |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2352 | struct lruvec *lruvec, struct list_head *list) |
| 2353 | { |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2354 | struct page *page_tail = head + tail; |
| 2355 | |
Kirill A. Shutemov | 8df651c | 2016-03-15 14:57:30 -0700 | [diff] [blame] | 2356 | VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2357 | |
| 2358 | /* |
Konstantin Khlebnikov | 605ca5e | 2018-04-05 16:23:28 -0700 | [diff] [blame] | 2359 | * Clone page flags before unfreezing refcount. |
| 2360 | * |
| 2361 | * After successful get_page_unless_zero() might follow flags change, |
| 2362 | * for exmaple lock_page() which set PG_waiters. |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2363 | */ |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2364 | page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; |
| 2365 | page_tail->flags |= (head->flags & |
| 2366 | ((1L << PG_referenced) | |
| 2367 | (1L << PG_swapbacked) | |
Huang Ying | 38d8b4e | 2017-07-06 15:37:18 -0700 | [diff] [blame] | 2368 | (1L << PG_swapcache) | |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2369 | (1L << PG_mlocked) | |
| 2370 | (1L << PG_uptodate) | |
| 2371 | (1L << PG_active) | |
| 2372 | (1L << PG_locked) | |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 2373 | (1L << PG_unevictable) | |
| 2374 | (1L << PG_dirty))); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2375 | |
Konstantin Khlebnikov | 605ca5e | 2018-04-05 16:23:28 -0700 | [diff] [blame] | 2376 | /* Page flags must be visible before we make the page non-compound. */ |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2377 | smp_wmb(); |
| 2378 | |
Konstantin Khlebnikov | 605ca5e | 2018-04-05 16:23:28 -0700 | [diff] [blame] | 2379 | /* |
| 2380 | * Clear PageTail before unfreezing page refcount. |
| 2381 | * |
| 2382 | * After successful get_page_unless_zero() might follow put_page() |
| 2383 | * which needs correct compound_head(). |
| 2384 | */ |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2385 | clear_compound_head(page_tail); |
| 2386 | |
Konstantin Khlebnikov | 605ca5e | 2018-04-05 16:23:28 -0700 | [diff] [blame] | 2387 | /* Finally unfreeze refcount. Additional reference from page cache. */ |
| 2388 | page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) || |
| 2389 | PageSwapCache(head))); |
| 2390 | |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2391 | if (page_is_young(head)) |
| 2392 | set_page_young(page_tail); |
| 2393 | if (page_is_idle(head)) |
| 2394 | set_page_idle(page_tail); |
| 2395 | |
| 2396 | /* ->mapping in first tail page is compound_mapcount */ |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2397 | VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING, |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2398 | page_tail); |
| 2399 | page_tail->mapping = head->mapping; |
| 2400 | |
| 2401 | page_tail->index = head->index + tail; |
| 2402 | page_cpupid_xchg_last(page_tail, page_cpupid_last(head)); |
Michal Hocko | 94723aa | 2018-04-10 16:30:07 -0700 | [diff] [blame] | 2403 | |
| 2404 | /* |
| 2405 | * always add to the tail because some iterators expect new |
| 2406 | * pages to show after the currently processed elements - e.g. |
| 2407 | * migrate_pages |
| 2408 | */ |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2409 | lru_add_page_tail(head, page_tail, lruvec, list); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2410 | } |
| 2411 | |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2412 | static void __split_huge_page(struct page *page, struct list_head *list, |
| 2413 | unsigned long flags) |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2414 | { |
| 2415 | struct page *head = compound_head(page); |
| 2416 | struct zone *zone = page_zone(head); |
| 2417 | struct lruvec *lruvec; |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2418 | pgoff_t end = -1; |
Kirill A. Shutemov | 8df651c | 2016-03-15 14:57:30 -0700 | [diff] [blame] | 2419 | int i; |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2420 | |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 2421 | lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2422 | |
| 2423 | /* complete memcg works before add pages to LRU */ |
| 2424 | mem_cgroup_split_huge_fixup(head); |
| 2425 | |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2426 | if (!PageAnon(page)) |
| 2427 | end = DIV_ROUND_UP(i_size_read(head->mapping->host), PAGE_SIZE); |
| 2428 | |
| 2429 | for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { |
Kirill A. Shutemov | 8df651c | 2016-03-15 14:57:30 -0700 | [diff] [blame] | 2430 | __split_huge_page_tail(head, i, lruvec, list); |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2431 | /* Some pages can be beyond i_size: drop them from page cache */ |
| 2432 | if (head[i].index >= end) { |
Hugh Dickins | 2d077d4 | 2018-06-01 16:50:45 -0700 | [diff] [blame] | 2433 | ClearPageDirty(head + i); |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2434 | __delete_from_page_cache(head + i, NULL); |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 2435 | if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) |
| 2436 | shmem_uncharge(head->mapping->host, 1); |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2437 | put_page(head + i); |
| 2438 | } |
| 2439 | } |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2440 | |
| 2441 | ClearPageCompound(head); |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2442 | /* See comment in __split_huge_page_tail() */ |
| 2443 | if (PageAnon(head)) { |
Huang Ying | 38d8b4e | 2017-07-06 15:37:18 -0700 | [diff] [blame] | 2444 | /* Additional pin to radix tree of swap cache */ |
| 2445 | if (PageSwapCache(head)) |
| 2446 | page_ref_add(head, 2); |
| 2447 | else |
| 2448 | page_ref_inc(head); |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2449 | } else { |
| 2450 | /* Additional pin to radix tree */ |
| 2451 | page_ref_add(head, 2); |
Matthew Wilcox | b93b016 | 2018-04-10 16:36:56 -0700 | [diff] [blame] | 2452 | xa_unlock(&head->mapping->i_pages); |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2453 | } |
| 2454 | |
Mel Gorman | a52633d | 2016-07-28 15:45:28 -0700 | [diff] [blame] | 2455 | spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2456 | |
Kirill A. Shutemov | fec89c1 | 2016-03-17 14:20:10 -0700 | [diff] [blame] | 2457 | unfreeze_page(head); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2458 | |
| 2459 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
| 2460 | struct page *subpage = head + i; |
| 2461 | if (subpage == page) |
| 2462 | continue; |
| 2463 | unlock_page(subpage); |
| 2464 | |
| 2465 | /* |
| 2466 | * Subpages may be freed if there wasn't any mapping |
| 2467 | * like if add_to_swap() is running on a lru page that |
| 2468 | * had its mapping zapped. And freeing these pages |
| 2469 | * requires taking the lru_lock so we do the put_page |
| 2470 | * of the tail pages after the split is complete. |
| 2471 | */ |
| 2472 | put_page(subpage); |
| 2473 | } |
| 2474 | } |
| 2475 | |
Kirill A. Shutemov | b20ce5e | 2016-01-15 16:54:37 -0800 | [diff] [blame] | 2476 | int total_mapcount(struct page *page) |
| 2477 | { |
Kirill A. Shutemov | dd78fed | 2016-07-26 15:25:26 -0700 | [diff] [blame] | 2478 | int i, compound, ret; |
Kirill A. Shutemov | b20ce5e | 2016-01-15 16:54:37 -0800 | [diff] [blame] | 2479 | |
| 2480 | VM_BUG_ON_PAGE(PageTail(page), page); |
| 2481 | |
| 2482 | if (likely(!PageCompound(page))) |
| 2483 | return atomic_read(&page->_mapcount) + 1; |
| 2484 | |
Kirill A. Shutemov | dd78fed | 2016-07-26 15:25:26 -0700 | [diff] [blame] | 2485 | compound = compound_mapcount(page); |
Kirill A. Shutemov | b20ce5e | 2016-01-15 16:54:37 -0800 | [diff] [blame] | 2486 | if (PageHuge(page)) |
Kirill A. Shutemov | dd78fed | 2016-07-26 15:25:26 -0700 | [diff] [blame] | 2487 | return compound; |
| 2488 | ret = compound; |
Kirill A. Shutemov | b20ce5e | 2016-01-15 16:54:37 -0800 | [diff] [blame] | 2489 | for (i = 0; i < HPAGE_PMD_NR; i++) |
| 2490 | ret += atomic_read(&page[i]._mapcount) + 1; |
Kirill A. Shutemov | dd78fed | 2016-07-26 15:25:26 -0700 | [diff] [blame] | 2491 | /* File pages has compound_mapcount included in _mapcount */ |
| 2492 | if (!PageAnon(page)) |
| 2493 | return ret - compound * HPAGE_PMD_NR; |
Kirill A. Shutemov | b20ce5e | 2016-01-15 16:54:37 -0800 | [diff] [blame] | 2494 | if (PageDoubleMap(page)) |
| 2495 | ret -= HPAGE_PMD_NR; |
| 2496 | return ret; |
| 2497 | } |
| 2498 | |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2499 | /* |
Andrea Arcangeli | 6d0a07e | 2016-05-12 15:42:25 -0700 | [diff] [blame] | 2500 | * This calculates accurately how many mappings a transparent hugepage |
| 2501 | * has (unlike page_mapcount() which isn't fully accurate). This full |
| 2502 | * accuracy is primarily needed to know if copy-on-write faults can |
| 2503 | * reuse the page and change the mapping to read-write instead of |
| 2504 | * copying them. At the same time this returns the total_mapcount too. |
| 2505 | * |
| 2506 | * The function returns the highest mapcount any one of the subpages |
| 2507 | * has. If the return value is one, even if different processes are |
| 2508 | * mapping different subpages of the transparent hugepage, they can |
| 2509 | * all reuse it, because each process is reusing a different subpage. |
| 2510 | * |
| 2511 | * The total_mapcount is instead counting all virtual mappings of the |
| 2512 | * subpages. If the total_mapcount is equal to "one", it tells the |
| 2513 | * caller all mappings belong to the same "mm" and in turn the |
| 2514 | * anon_vma of the transparent hugepage can become the vma->anon_vma |
| 2515 | * local one as no other process may be mapping any of the subpages. |
| 2516 | * |
| 2517 | * It would be more accurate to replace page_mapcount() with |
| 2518 | * page_trans_huge_mapcount(), however we only use |
| 2519 | * page_trans_huge_mapcount() in the copy-on-write faults where we |
| 2520 | * need full accuracy to avoid breaking page pinning, because |
| 2521 | * page_trans_huge_mapcount() is slower than page_mapcount(). |
| 2522 | */ |
| 2523 | int page_trans_huge_mapcount(struct page *page, int *total_mapcount) |
| 2524 | { |
| 2525 | int i, ret, _total_mapcount, mapcount; |
| 2526 | |
| 2527 | /* hugetlbfs shouldn't call it */ |
| 2528 | VM_BUG_ON_PAGE(PageHuge(page), page); |
| 2529 | |
| 2530 | if (likely(!PageTransCompound(page))) { |
| 2531 | mapcount = atomic_read(&page->_mapcount) + 1; |
| 2532 | if (total_mapcount) |
| 2533 | *total_mapcount = mapcount; |
| 2534 | return mapcount; |
| 2535 | } |
| 2536 | |
| 2537 | page = compound_head(page); |
| 2538 | |
| 2539 | _total_mapcount = ret = 0; |
| 2540 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
| 2541 | mapcount = atomic_read(&page[i]._mapcount) + 1; |
| 2542 | ret = max(ret, mapcount); |
| 2543 | _total_mapcount += mapcount; |
| 2544 | } |
| 2545 | if (PageDoubleMap(page)) { |
| 2546 | ret -= 1; |
| 2547 | _total_mapcount -= HPAGE_PMD_NR; |
| 2548 | } |
| 2549 | mapcount = compound_mapcount(page); |
| 2550 | ret += mapcount; |
| 2551 | _total_mapcount += mapcount; |
| 2552 | if (total_mapcount) |
| 2553 | *total_mapcount = _total_mapcount; |
| 2554 | return ret; |
| 2555 | } |
| 2556 | |
Huang Ying | b8f593c | 2017-07-06 15:37:28 -0700 | [diff] [blame] | 2557 | /* Racy check whether the huge page can be split */ |
| 2558 | bool can_split_huge_page(struct page *page, int *pextra_pins) |
| 2559 | { |
| 2560 | int extra_pins; |
| 2561 | |
| 2562 | /* Additional pins from radix tree */ |
| 2563 | if (PageAnon(page)) |
| 2564 | extra_pins = PageSwapCache(page) ? HPAGE_PMD_NR : 0; |
| 2565 | else |
| 2566 | extra_pins = HPAGE_PMD_NR; |
| 2567 | if (pextra_pins) |
| 2568 | *pextra_pins = extra_pins; |
| 2569 | return total_mapcount(page) == page_count(page) - extra_pins - 1; |
| 2570 | } |
| 2571 | |
Andrea Arcangeli | 6d0a07e | 2016-05-12 15:42:25 -0700 | [diff] [blame] | 2572 | /* |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2573 | * This function splits huge page into normal pages. @page can point to any |
| 2574 | * subpage of huge page to split. Split doesn't change the position of @page. |
| 2575 | * |
| 2576 | * Only caller must hold pin on the @page, otherwise split fails with -EBUSY. |
| 2577 | * The huge page must be locked. |
| 2578 | * |
| 2579 | * If @list is null, tail pages will be added to LRU list, otherwise, to @list. |
| 2580 | * |
| 2581 | * Both head page and tail pages will inherit mapping, flags, and so on from |
| 2582 | * the hugepage. |
| 2583 | * |
| 2584 | * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if |
| 2585 | * they are not mapped. |
| 2586 | * |
| 2587 | * Returns 0 if the hugepage is split successfully. |
| 2588 | * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under |
| 2589 | * us. |
| 2590 | */ |
| 2591 | int split_huge_page_to_list(struct page *page, struct list_head *list) |
| 2592 | { |
| 2593 | struct page *head = compound_head(page); |
Kirill A. Shutemov | a3d0a918 | 2016-02-02 16:57:08 -0800 | [diff] [blame] | 2594 | struct pglist_data *pgdata = NODE_DATA(page_to_nid(head)); |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2595 | struct anon_vma *anon_vma = NULL; |
| 2596 | struct address_space *mapping = NULL; |
| 2597 | int count, mapcount, extra_pins, ret; |
Kirill A. Shutemov | d965432 | 2016-01-15 16:54:43 -0800 | [diff] [blame] | 2598 | bool mlocked; |
Kirill A. Shutemov | 0b9b6ff | 2016-01-20 14:58:09 -0800 | [diff] [blame] | 2599 | unsigned long flags; |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2600 | |
| 2601 | VM_BUG_ON_PAGE(is_huge_zero_page(page), page); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2602 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2603 | VM_BUG_ON_PAGE(!PageCompound(page), page); |
| 2604 | |
Huang Ying | 5980768 | 2017-09-06 16:22:34 -0700 | [diff] [blame] | 2605 | if (PageWriteback(page)) |
| 2606 | return -EBUSY; |
| 2607 | |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2608 | if (PageAnon(head)) { |
| 2609 | /* |
| 2610 | * The caller does not necessarily hold an mmap_sem that would |
| 2611 | * prevent the anon_vma disappearing so we first we take a |
| 2612 | * reference to it and then lock the anon_vma for write. This |
| 2613 | * is similar to page_lock_anon_vma_read except the write lock |
| 2614 | * is taken to serialise against parallel split or collapse |
| 2615 | * operations. |
| 2616 | */ |
| 2617 | anon_vma = page_get_anon_vma(head); |
| 2618 | if (!anon_vma) { |
| 2619 | ret = -EBUSY; |
| 2620 | goto out; |
| 2621 | } |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2622 | mapping = NULL; |
| 2623 | anon_vma_lock_write(anon_vma); |
| 2624 | } else { |
| 2625 | mapping = head->mapping; |
| 2626 | |
| 2627 | /* Truncated ? */ |
| 2628 | if (!mapping) { |
| 2629 | ret = -EBUSY; |
| 2630 | goto out; |
| 2631 | } |
| 2632 | |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2633 | anon_vma = NULL; |
| 2634 | i_mmap_lock_read(mapping); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2635 | } |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2636 | |
| 2637 | /* |
| 2638 | * Racy check if we can split the page, before freeze_page() will |
| 2639 | * split PMDs |
| 2640 | */ |
Huang Ying | b8f593c | 2017-07-06 15:37:28 -0700 | [diff] [blame] | 2641 | if (!can_split_huge_page(head, &extra_pins)) { |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2642 | ret = -EBUSY; |
| 2643 | goto out_unlock; |
| 2644 | } |
| 2645 | |
Kirill A. Shutemov | d965432 | 2016-01-15 16:54:43 -0800 | [diff] [blame] | 2646 | mlocked = PageMlocked(page); |
Kirill A. Shutemov | fec89c1 | 2016-03-17 14:20:10 -0700 | [diff] [blame] | 2647 | freeze_page(head); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2648 | VM_BUG_ON_PAGE(compound_mapcount(head), head); |
| 2649 | |
Kirill A. Shutemov | d965432 | 2016-01-15 16:54:43 -0800 | [diff] [blame] | 2650 | /* Make sure the page is not on per-CPU pagevec as it takes pin */ |
| 2651 | if (mlocked) |
| 2652 | lru_add_drain(); |
| 2653 | |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2654 | /* prevent PageLRU to go away from under us, and freeze lru stats */ |
Mel Gorman | a52633d | 2016-07-28 15:45:28 -0700 | [diff] [blame] | 2655 | spin_lock_irqsave(zone_lru_lock(page_zone(head)), flags); |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2656 | |
| 2657 | if (mapping) { |
| 2658 | void **pslot; |
| 2659 | |
Matthew Wilcox | b93b016 | 2018-04-10 16:36:56 -0700 | [diff] [blame] | 2660 | xa_lock(&mapping->i_pages); |
| 2661 | pslot = radix_tree_lookup_slot(&mapping->i_pages, |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2662 | page_index(head)); |
| 2663 | /* |
| 2664 | * Check if the head page is present in radix tree. |
| 2665 | * We assume all tail are present too, if head is there. |
| 2666 | */ |
| 2667 | if (radix_tree_deref_slot_protected(pslot, |
Matthew Wilcox | b93b016 | 2018-04-10 16:36:56 -0700 | [diff] [blame] | 2668 | &mapping->i_pages.xa_lock) != head) |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2669 | goto fail; |
| 2670 | } |
| 2671 | |
Joonsoo Kim | 0139aa7 | 2016-05-19 17:10:49 -0700 | [diff] [blame] | 2672 | /* Prevent deferred_split_scan() touching ->_refcount */ |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2673 | spin_lock(&pgdata->split_queue_lock); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2674 | count = page_count(head); |
| 2675 | mapcount = total_mapcount(head); |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2676 | if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) { |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2677 | if (!list_empty(page_deferred_list(head))) { |
Kirill A. Shutemov | a3d0a918 | 2016-02-02 16:57:08 -0800 | [diff] [blame] | 2678 | pgdata->split_queue_len--; |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2679 | list_del(page_deferred_list(head)); |
| 2680 | } |
Kirill A. Shutemov | 65c4537 | 2016-07-26 15:26:10 -0700 | [diff] [blame] | 2681 | if (mapping) |
Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 2682 | __dec_node_page_state(page, NR_SHMEM_THPS); |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2683 | spin_unlock(&pgdata->split_queue_lock); |
| 2684 | __split_huge_page(page, list, flags); |
Huang Ying | 5980768 | 2017-09-06 16:22:34 -0700 | [diff] [blame] | 2685 | if (PageSwapCache(head)) { |
| 2686 | swp_entry_t entry = { .val = page_private(head) }; |
| 2687 | |
| 2688 | ret = split_swap_cluster(entry); |
| 2689 | } else |
| 2690 | ret = 0; |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2691 | } else { |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2692 | if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) { |
| 2693 | pr_alert("total_mapcount: %u, page_count(): %u\n", |
| 2694 | mapcount, count); |
| 2695 | if (PageTail(page)) |
| 2696 | dump_page(head, NULL); |
| 2697 | dump_page(page, "total_mapcount(head) > 0"); |
| 2698 | BUG(); |
| 2699 | } |
| 2700 | spin_unlock(&pgdata->split_queue_lock); |
| 2701 | fail: if (mapping) |
Matthew Wilcox | b93b016 | 2018-04-10 16:36:56 -0700 | [diff] [blame] | 2702 | xa_unlock(&mapping->i_pages); |
Mel Gorman | a52633d | 2016-07-28 15:45:28 -0700 | [diff] [blame] | 2703 | spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); |
Kirill A. Shutemov | fec89c1 | 2016-03-17 14:20:10 -0700 | [diff] [blame] | 2704 | unfreeze_page(head); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2705 | ret = -EBUSY; |
| 2706 | } |
| 2707 | |
| 2708 | out_unlock: |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2709 | if (anon_vma) { |
| 2710 | anon_vma_unlock_write(anon_vma); |
| 2711 | put_anon_vma(anon_vma); |
| 2712 | } |
| 2713 | if (mapping) |
| 2714 | i_mmap_unlock_read(mapping); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2715 | out: |
| 2716 | count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); |
| 2717 | return ret; |
| 2718 | } |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2719 | |
| 2720 | void free_transhuge_page(struct page *page) |
| 2721 | { |
Kirill A. Shutemov | a3d0a918 | 2016-02-02 16:57:08 -0800 | [diff] [blame] | 2722 | struct pglist_data *pgdata = NODE_DATA(page_to_nid(page)); |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2723 | unsigned long flags; |
| 2724 | |
Kirill A. Shutemov | a3d0a918 | 2016-02-02 16:57:08 -0800 | [diff] [blame] | 2725 | spin_lock_irqsave(&pgdata->split_queue_lock, flags); |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2726 | if (!list_empty(page_deferred_list(page))) { |
Kirill A. Shutemov | a3d0a918 | 2016-02-02 16:57:08 -0800 | [diff] [blame] | 2727 | pgdata->split_queue_len--; |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2728 | list_del(page_deferred_list(page)); |
| 2729 | } |
Kirill A. Shutemov | a3d0a918 | 2016-02-02 16:57:08 -0800 | [diff] [blame] | 2730 | spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2731 | free_compound_page(page); |
| 2732 | } |
| 2733 | |
| 2734 | void deferred_split_huge_page(struct page *page) |
| 2735 | { |
Kirill A. Shutemov | a3d0a918 | 2016-02-02 16:57:08 -0800 | [diff] [blame] | 2736 | struct pglist_data *pgdata = NODE_DATA(page_to_nid(page)); |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2737 | unsigned long flags; |
| 2738 | |
| 2739 | VM_BUG_ON_PAGE(!PageTransHuge(page), page); |
| 2740 | |
Kirill A. Shutemov | a3d0a918 | 2016-02-02 16:57:08 -0800 | [diff] [blame] | 2741 | spin_lock_irqsave(&pgdata->split_queue_lock, flags); |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2742 | if (list_empty(page_deferred_list(page))) { |
Kirill A. Shutemov | f9719a0 | 2016-03-17 14:18:45 -0700 | [diff] [blame] | 2743 | count_vm_event(THP_DEFERRED_SPLIT_PAGE); |
Kirill A. Shutemov | a3d0a918 | 2016-02-02 16:57:08 -0800 | [diff] [blame] | 2744 | list_add_tail(page_deferred_list(page), &pgdata->split_queue); |
| 2745 | pgdata->split_queue_len++; |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2746 | } |
Kirill A. Shutemov | a3d0a918 | 2016-02-02 16:57:08 -0800 | [diff] [blame] | 2747 | spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2748 | } |
| 2749 | |
| 2750 | static unsigned long deferred_split_count(struct shrinker *shrink, |
| 2751 | struct shrink_control *sc) |
| 2752 | { |
Kirill A. Shutemov | a3d0a918 | 2016-02-02 16:57:08 -0800 | [diff] [blame] | 2753 | struct pglist_data *pgdata = NODE_DATA(sc->nid); |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 2754 | return READ_ONCE(pgdata->split_queue_len); |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2755 | } |
| 2756 | |
| 2757 | static unsigned long deferred_split_scan(struct shrinker *shrink, |
| 2758 | struct shrink_control *sc) |
| 2759 | { |
Kirill A. Shutemov | a3d0a918 | 2016-02-02 16:57:08 -0800 | [diff] [blame] | 2760 | struct pglist_data *pgdata = NODE_DATA(sc->nid); |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2761 | unsigned long flags; |
| 2762 | LIST_HEAD(list), *pos, *next; |
| 2763 | struct page *page; |
| 2764 | int split = 0; |
| 2765 | |
Kirill A. Shutemov | a3d0a918 | 2016-02-02 16:57:08 -0800 | [diff] [blame] | 2766 | spin_lock_irqsave(&pgdata->split_queue_lock, flags); |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2767 | /* Take pin on all head pages to avoid freeing them under us */ |
Kirill A. Shutemov | ae02620 | 2016-02-05 15:36:53 -0800 | [diff] [blame] | 2768 | list_for_each_safe(pos, next, &pgdata->split_queue) { |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2769 | page = list_entry((void *)pos, struct page, mapping); |
| 2770 | page = compound_head(page); |
Kirill A. Shutemov | e3ae195 | 2016-02-02 16:57:15 -0800 | [diff] [blame] | 2771 | if (get_page_unless_zero(page)) { |
| 2772 | list_move(page_deferred_list(page), &list); |
| 2773 | } else { |
| 2774 | /* We lost race with put_compound_page() */ |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2775 | list_del_init(page_deferred_list(page)); |
Kirill A. Shutemov | a3d0a918 | 2016-02-02 16:57:08 -0800 | [diff] [blame] | 2776 | pgdata->split_queue_len--; |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2777 | } |
Kirill A. Shutemov | e3ae195 | 2016-02-02 16:57:15 -0800 | [diff] [blame] | 2778 | if (!--sc->nr_to_scan) |
| 2779 | break; |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2780 | } |
Kirill A. Shutemov | a3d0a918 | 2016-02-02 16:57:08 -0800 | [diff] [blame] | 2781 | spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2782 | |
| 2783 | list_for_each_safe(pos, next, &list) { |
| 2784 | page = list_entry((void *)pos, struct page, mapping); |
Kirill A. Shutemov | fa41b90 | 2018-03-22 16:17:31 -0700 | [diff] [blame] | 2785 | if (!trylock_page(page)) |
| 2786 | goto next; |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2787 | /* split_huge_page() removes page from list on success */ |
| 2788 | if (!split_huge_page(page)) |
| 2789 | split++; |
| 2790 | unlock_page(page); |
Kirill A. Shutemov | fa41b90 | 2018-03-22 16:17:31 -0700 | [diff] [blame] | 2791 | next: |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2792 | put_page(page); |
| 2793 | } |
| 2794 | |
Kirill A. Shutemov | a3d0a918 | 2016-02-02 16:57:08 -0800 | [diff] [blame] | 2795 | spin_lock_irqsave(&pgdata->split_queue_lock, flags); |
| 2796 | list_splice_tail(&list, &pgdata->split_queue); |
| 2797 | spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2798 | |
Kirill A. Shutemov | cb8d68e | 2016-02-02 16:57:12 -0800 | [diff] [blame] | 2799 | /* |
| 2800 | * Stop shrinker if we didn't split any page, but the queue is empty. |
| 2801 | * This can happen if pages were freed under us. |
| 2802 | */ |
| 2803 | if (!split && list_empty(&pgdata->split_queue)) |
| 2804 | return SHRINK_STOP; |
| 2805 | return split; |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2806 | } |
| 2807 | |
| 2808 | static struct shrinker deferred_split_shrinker = { |
| 2809 | .count_objects = deferred_split_count, |
| 2810 | .scan_objects = deferred_split_scan, |
| 2811 | .seeks = DEFAULT_SEEKS, |
Kirill A. Shutemov | a3d0a918 | 2016-02-02 16:57:08 -0800 | [diff] [blame] | 2812 | .flags = SHRINKER_NUMA_AWARE, |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2813 | }; |
Kirill A. Shutemov | 49071d4 | 2016-01-15 16:54:40 -0800 | [diff] [blame] | 2814 | |
| 2815 | #ifdef CONFIG_DEBUG_FS |
| 2816 | static int split_huge_pages_set(void *data, u64 val) |
| 2817 | { |
| 2818 | struct zone *zone; |
| 2819 | struct page *page; |
| 2820 | unsigned long pfn, max_zone_pfn; |
| 2821 | unsigned long total = 0, split = 0; |
| 2822 | |
| 2823 | if (val != 1) |
| 2824 | return -EINVAL; |
| 2825 | |
| 2826 | for_each_populated_zone(zone) { |
| 2827 | max_zone_pfn = zone_end_pfn(zone); |
| 2828 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { |
| 2829 | if (!pfn_valid(pfn)) |
| 2830 | continue; |
| 2831 | |
| 2832 | page = pfn_to_page(pfn); |
| 2833 | if (!get_page_unless_zero(page)) |
| 2834 | continue; |
| 2835 | |
| 2836 | if (zone != page_zone(page)) |
| 2837 | goto next; |
| 2838 | |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 2839 | if (!PageHead(page) || PageHuge(page) || !PageLRU(page)) |
Kirill A. Shutemov | 49071d4 | 2016-01-15 16:54:40 -0800 | [diff] [blame] | 2840 | goto next; |
| 2841 | |
| 2842 | total++; |
| 2843 | lock_page(page); |
| 2844 | if (!split_huge_page(page)) |
| 2845 | split++; |
| 2846 | unlock_page(page); |
| 2847 | next: |
| 2848 | put_page(page); |
| 2849 | } |
| 2850 | } |
| 2851 | |
Yang Shi | 145bdaa | 2016-05-05 16:22:00 -0700 | [diff] [blame] | 2852 | pr_info("%lu of %lu THP split\n", split, total); |
Kirill A. Shutemov | 49071d4 | 2016-01-15 16:54:40 -0800 | [diff] [blame] | 2853 | |
| 2854 | return 0; |
| 2855 | } |
| 2856 | DEFINE_SIMPLE_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set, |
| 2857 | "%llu\n"); |
| 2858 | |
| 2859 | static int __init split_huge_pages_debugfs(void) |
| 2860 | { |
| 2861 | void *ret; |
| 2862 | |
Yang Shi | 145bdaa | 2016-05-05 16:22:00 -0700 | [diff] [blame] | 2863 | ret = debugfs_create_file("split_huge_pages", 0200, NULL, NULL, |
Kirill A. Shutemov | 49071d4 | 2016-01-15 16:54:40 -0800 | [diff] [blame] | 2864 | &split_huge_pages_fops); |
| 2865 | if (!ret) |
| 2866 | pr_warn("Failed to create split_huge_pages in debugfs"); |
| 2867 | return 0; |
| 2868 | } |
| 2869 | late_initcall(split_huge_pages_debugfs); |
| 2870 | #endif |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 2871 | |
| 2872 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION |
| 2873 | void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, |
| 2874 | struct page *page) |
| 2875 | { |
| 2876 | struct vm_area_struct *vma = pvmw->vma; |
| 2877 | struct mm_struct *mm = vma->vm_mm; |
| 2878 | unsigned long address = pvmw->address; |
| 2879 | pmd_t pmdval; |
| 2880 | swp_entry_t entry; |
Naoya Horiguchi | ab6e3d0 | 2017-09-08 16:11:04 -0700 | [diff] [blame] | 2881 | pmd_t pmdswp; |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 2882 | |
| 2883 | if (!(pvmw->pmd && !pvmw->pte)) |
| 2884 | return; |
| 2885 | |
| 2886 | mmu_notifier_invalidate_range_start(mm, address, |
| 2887 | address + HPAGE_PMD_SIZE); |
| 2888 | |
| 2889 | flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); |
| 2890 | pmdval = *pvmw->pmd; |
| 2891 | pmdp_invalidate(vma, address, pvmw->pmd); |
| 2892 | if (pmd_dirty(pmdval)) |
| 2893 | set_page_dirty(page); |
| 2894 | entry = make_migration_entry(page, pmd_write(pmdval)); |
Naoya Horiguchi | ab6e3d0 | 2017-09-08 16:11:04 -0700 | [diff] [blame] | 2895 | pmdswp = swp_entry_to_pmd(entry); |
| 2896 | if (pmd_soft_dirty(pmdval)) |
| 2897 | pmdswp = pmd_swp_mksoft_dirty(pmdswp); |
| 2898 | set_pmd_at(mm, address, pvmw->pmd, pmdswp); |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 2899 | page_remove_rmap(page, true); |
| 2900 | put_page(page); |
| 2901 | |
| 2902 | mmu_notifier_invalidate_range_end(mm, address, |
| 2903 | address + HPAGE_PMD_SIZE); |
| 2904 | } |
| 2905 | |
| 2906 | void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) |
| 2907 | { |
| 2908 | struct vm_area_struct *vma = pvmw->vma; |
| 2909 | struct mm_struct *mm = vma->vm_mm; |
| 2910 | unsigned long address = pvmw->address; |
| 2911 | unsigned long mmun_start = address & HPAGE_PMD_MASK; |
| 2912 | pmd_t pmde; |
| 2913 | swp_entry_t entry; |
| 2914 | |
| 2915 | if (!(pvmw->pmd && !pvmw->pte)) |
| 2916 | return; |
| 2917 | |
| 2918 | entry = pmd_to_swp_entry(*pvmw->pmd); |
| 2919 | get_page(new); |
| 2920 | pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot)); |
Naoya Horiguchi | ab6e3d0 | 2017-09-08 16:11:04 -0700 | [diff] [blame] | 2921 | if (pmd_swp_soft_dirty(*pvmw->pmd)) |
| 2922 | pmde = pmd_mksoft_dirty(pmde); |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 2923 | if (is_write_migration_entry(entry)) |
Linus Torvalds | f55e101 | 2017-11-29 09:01:01 -0800 | [diff] [blame] | 2924 | pmde = maybe_pmd_mkwrite(pmde, vma); |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 2925 | |
| 2926 | flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE); |
Naoya Horiguchi | e71769a | 2018-04-20 14:55:45 -0700 | [diff] [blame] | 2927 | if (PageAnon(new)) |
| 2928 | page_add_anon_rmap(new, vma, mmun_start, true); |
| 2929 | else |
| 2930 | page_add_file_rmap(new, true); |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 2931 | set_pmd_at(mm, mmun_start, pvmw->pmd, pmde); |
| 2932 | if (vma->vm_flags & VM_LOCKED) |
| 2933 | mlock_vma_page(new); |
| 2934 | update_mmu_cache_pmd(vma, address, pvmw->pmd); |
| 2935 | } |
| 2936 | #endif |