Dan Williams | e900a91 | 2019-05-14 15:41:28 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | // Copyright(c) 2018 Intel Corporation. All rights reserved. |
| 3 | |
| 4 | #include <linux/mm.h> |
| 5 | #include <linux/init.h> |
| 6 | #include <linux/mmzone.h> |
| 7 | #include <linux/random.h> |
| 8 | #include <linux/moduleparam.h> |
| 9 | #include "internal.h" |
| 10 | #include "shuffle.h" |
| 11 | |
| 12 | DEFINE_STATIC_KEY_FALSE(page_alloc_shuffle_key); |
| 13 | static unsigned long shuffle_state __ro_after_init; |
| 14 | |
| 15 | /* |
| 16 | * Depending on the architecture, module parameter parsing may run |
| 17 | * before, or after the cache detection. SHUFFLE_FORCE_DISABLE prevents, |
| 18 | * or reverts the enabling of the shuffle implementation. SHUFFLE_ENABLE |
| 19 | * attempts to turn on the implementation, but aborts if it finds |
| 20 | * SHUFFLE_FORCE_DISABLE already set. |
| 21 | */ |
| 22 | __meminit void page_alloc_shuffle(enum mm_shuffle_ctl ctl) |
| 23 | { |
| 24 | if (ctl == SHUFFLE_FORCE_DISABLE) |
| 25 | set_bit(SHUFFLE_FORCE_DISABLE, &shuffle_state); |
| 26 | |
| 27 | if (test_bit(SHUFFLE_FORCE_DISABLE, &shuffle_state)) { |
| 28 | if (test_and_clear_bit(SHUFFLE_ENABLE, &shuffle_state)) |
| 29 | static_branch_disable(&page_alloc_shuffle_key); |
| 30 | } else if (ctl == SHUFFLE_ENABLE |
| 31 | && !test_and_set_bit(SHUFFLE_ENABLE, &shuffle_state)) |
| 32 | static_branch_enable(&page_alloc_shuffle_key); |
| 33 | } |
| 34 | |
| 35 | static bool shuffle_param; |
Yi Wang | 758b8db | 2019-10-06 17:58:12 -0700 | [diff] [blame] | 36 | static int shuffle_show(char *buffer, const struct kernel_param *kp) |
Dan Williams | e900a91 | 2019-05-14 15:41:28 -0700 | [diff] [blame] | 37 | { |
| 38 | return sprintf(buffer, "%c\n", test_bit(SHUFFLE_ENABLE, &shuffle_state) |
| 39 | ? 'Y' : 'N'); |
| 40 | } |
| 41 | |
| 42 | static __meminit int shuffle_store(const char *val, |
| 43 | const struct kernel_param *kp) |
| 44 | { |
| 45 | int rc = param_set_bool(val, kp); |
| 46 | |
| 47 | if (rc < 0) |
| 48 | return rc; |
| 49 | if (shuffle_param) |
| 50 | page_alloc_shuffle(SHUFFLE_ENABLE); |
| 51 | else |
| 52 | page_alloc_shuffle(SHUFFLE_FORCE_DISABLE); |
| 53 | return 0; |
| 54 | } |
| 55 | module_param_call(shuffle, shuffle_store, shuffle_show, &shuffle_param, 0400); |
| 56 | |
| 57 | /* |
| 58 | * For two pages to be swapped in the shuffle, they must be free (on a |
| 59 | * 'free_area' lru), have the same order, and have the same migratetype. |
| 60 | */ |
| 61 | static struct page * __meminit shuffle_valid_page(unsigned long pfn, int order) |
| 62 | { |
| 63 | struct page *page; |
| 64 | |
| 65 | /* |
| 66 | * Given we're dealing with randomly selected pfns in a zone we |
| 67 | * need to ask questions like... |
| 68 | */ |
| 69 | |
| 70 | /* ...is the pfn even in the memmap? */ |
| 71 | if (!pfn_valid_within(pfn)) |
| 72 | return NULL; |
| 73 | |
| 74 | /* ...is the pfn in a present section or a hole? */ |
Pingfan Liu | e03d1f7 | 2020-04-01 21:09:27 -0700 | [diff] [blame] | 75 | if (!pfn_in_present_section(pfn)) |
Dan Williams | e900a91 | 2019-05-14 15:41:28 -0700 | [diff] [blame] | 76 | return NULL; |
| 77 | |
| 78 | /* ...is the page free and currently on a free_area list? */ |
| 79 | page = pfn_to_page(pfn); |
| 80 | if (!PageBuddy(page)) |
| 81 | return NULL; |
| 82 | |
| 83 | /* |
| 84 | * ...is the page on the same list as the page we will |
| 85 | * shuffle it with? |
| 86 | */ |
| 87 | if (page_order(page) != order) |
| 88 | return NULL; |
| 89 | |
| 90 | return page; |
| 91 | } |
| 92 | |
| 93 | /* |
| 94 | * Fisher-Yates shuffle the freelist which prescribes iterating through an |
| 95 | * array, pfns in this case, and randomly swapping each entry with another in |
| 96 | * the span, end_pfn - start_pfn. |
| 97 | * |
| 98 | * To keep the implementation simple it does not attempt to correct for sources |
| 99 | * of bias in the distribution, like modulo bias or pseudo-random number |
| 100 | * generator bias. I.e. the expectation is that this shuffling raises the bar |
| 101 | * for attacks that exploit the predictability of page allocations, but need not |
| 102 | * be a perfect shuffle. |
| 103 | */ |
| 104 | #define SHUFFLE_RETRY 10 |
| 105 | void __meminit __shuffle_zone(struct zone *z) |
| 106 | { |
| 107 | unsigned long i, flags; |
| 108 | unsigned long start_pfn = z->zone_start_pfn; |
| 109 | unsigned long end_pfn = zone_end_pfn(z); |
| 110 | const int order = SHUFFLE_ORDER; |
| 111 | const int order_pages = 1 << order; |
| 112 | |
| 113 | spin_lock_irqsave(&z->lock, flags); |
| 114 | start_pfn = ALIGN(start_pfn, order_pages); |
| 115 | for (i = start_pfn; i < end_pfn; i += order_pages) { |
| 116 | unsigned long j; |
| 117 | int migratetype, retry; |
| 118 | struct page *page_i, *page_j; |
| 119 | |
| 120 | /* |
| 121 | * We expect page_i, in the sub-range of a zone being added |
| 122 | * (@start_pfn to @end_pfn), to more likely be valid compared to |
| 123 | * page_j randomly selected in the span @zone_start_pfn to |
| 124 | * @spanned_pages. |
| 125 | */ |
| 126 | page_i = shuffle_valid_page(i, order); |
| 127 | if (!page_i) |
| 128 | continue; |
| 129 | |
| 130 | for (retry = 0; retry < SHUFFLE_RETRY; retry++) { |
| 131 | /* |
| 132 | * Pick a random order aligned page in the zone span as |
| 133 | * a swap target. If the selected pfn is a hole, retry |
| 134 | * up to SHUFFLE_RETRY attempts find a random valid pfn |
| 135 | * in the zone. |
| 136 | */ |
| 137 | j = z->zone_start_pfn + |
| 138 | ALIGN_DOWN(get_random_long() % z->spanned_pages, |
| 139 | order_pages); |
| 140 | page_j = shuffle_valid_page(j, order); |
| 141 | if (page_j && page_j != page_i) |
| 142 | break; |
| 143 | } |
| 144 | if (retry >= SHUFFLE_RETRY) { |
| 145 | pr_debug("%s: failed to swap %#lx\n", __func__, i); |
| 146 | continue; |
| 147 | } |
| 148 | |
| 149 | /* |
| 150 | * Each migratetype corresponds to its own list, make sure the |
| 151 | * types match otherwise we're moving pages to lists where they |
| 152 | * do not belong. |
| 153 | */ |
| 154 | migratetype = get_pageblock_migratetype(page_i); |
| 155 | if (get_pageblock_migratetype(page_j) != migratetype) { |
| 156 | pr_debug("%s: migratetype mismatch %#lx\n", __func__, i); |
| 157 | continue; |
| 158 | } |
| 159 | |
| 160 | list_swap(&page_i->lru, &page_j->lru); |
| 161 | |
| 162 | pr_debug("%s: swap: %#lx -> %#lx\n", __func__, i, j); |
| 163 | |
| 164 | /* take it easy on the zone lock */ |
| 165 | if ((i % (100 * order_pages)) == 0) { |
| 166 | spin_unlock_irqrestore(&z->lock, flags); |
| 167 | cond_resched(); |
| 168 | spin_lock_irqsave(&z->lock, flags); |
| 169 | } |
| 170 | } |
| 171 | spin_unlock_irqrestore(&z->lock, flags); |
| 172 | } |
| 173 | |
| 174 | /** |
| 175 | * shuffle_free_memory - reduce the predictability of the page allocator |
| 176 | * @pgdat: node page data |
| 177 | */ |
| 178 | void __meminit __shuffle_free_memory(pg_data_t *pgdat) |
| 179 | { |
| 180 | struct zone *z; |
| 181 | |
| 182 | for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) |
| 183 | shuffle_zone(z); |
| 184 | } |
Dan Williams | 97500a4 | 2019-05-14 15:41:35 -0700 | [diff] [blame] | 185 | |
Alexander Duyck | a2129f2 | 2020-04-06 20:04:45 -0700 | [diff] [blame] | 186 | bool shuffle_pick_tail(void) |
Dan Williams | 97500a4 | 2019-05-14 15:41:35 -0700 | [diff] [blame] | 187 | { |
| 188 | static u64 rand; |
| 189 | static u8 rand_bits; |
Alexander Duyck | a2129f2 | 2020-04-06 20:04:45 -0700 | [diff] [blame] | 190 | bool ret; |
Dan Williams | 97500a4 | 2019-05-14 15:41:35 -0700 | [diff] [blame] | 191 | |
| 192 | /* |
| 193 | * The lack of locking is deliberate. If 2 threads race to |
| 194 | * update the rand state it just adds to the entropy. |
| 195 | */ |
| 196 | if (rand_bits == 0) { |
| 197 | rand_bits = 64; |
| 198 | rand = get_random_u64(); |
| 199 | } |
| 200 | |
Alexander Duyck | a2129f2 | 2020-04-06 20:04:45 -0700 | [diff] [blame] | 201 | ret = rand & 1; |
| 202 | |
Dan Williams | 97500a4 | 2019-05-14 15:41:35 -0700 | [diff] [blame] | 203 | rand_bits--; |
| 204 | rand >>= 1; |
Alexander Duyck | a2129f2 | 2020-04-06 20:04:45 -0700 | [diff] [blame] | 205 | |
| 206 | return ret; |
Dan Williams | 97500a4 | 2019-05-14 15:41:35 -0700 | [diff] [blame] | 207 | } |