Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Michael Ellerman | 51c52e8 | 2008-06-24 11:32:36 +1000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) |
| 4 | * |
| 5 | * Modifications for ppc64: |
| 6 | * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com> |
| 7 | * |
| 8 | * Copyright 2008 Michael Ellerman, IBM Corporation. |
Michael Ellerman | 51c52e8 | 2008-06-24 11:32:36 +1000 | [diff] [blame] | 9 | */ |
| 10 | |
Stephen Rothwell | 3880ecb | 2010-06-28 21:08:29 +0000 | [diff] [blame] | 11 | #include <linux/types.h> |
Aneesh Kumar K.V | 309b315 | 2016-07-23 14:42:38 +0530 | [diff] [blame] | 12 | #include <linux/jump_label.h> |
Michael Ellerman | 51c52e8 | 2008-06-24 11:32:36 +1000 | [diff] [blame] | 13 | #include <linux/kernel.h> |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 14 | #include <linux/string.h> |
| 15 | #include <linux/init.h> |
Ingo Molnar | 589ee62 | 2017-02-04 00:16:44 +0100 | [diff] [blame] | 16 | #include <linux/sched/mm.h> |
Michael Ellerman | 8ec7791 | 2021-05-06 14:49:58 +1000 | [diff] [blame] | 17 | #include <linux/stop_machine.h> |
Michael Ellerman | 51c52e8 | 2008-06-24 11:32:36 +1000 | [diff] [blame] | 18 | #include <asm/cputable.h> |
| 19 | #include <asm/code-patching.h> |
Nicholas Piggin | 1379974 | 2021-06-18 01:51:11 +1000 | [diff] [blame] | 20 | #include <asm/interrupt.h> |
Anton Blanchard | d715e43 | 2011-11-14 12:54:47 +0000 | [diff] [blame] | 21 | #include <asm/page.h> |
| 22 | #include <asm/sections.h> |
Benjamin Herrenschmidt | 9402c68 | 2016-07-05 15:03:41 +1000 | [diff] [blame] | 23 | #include <asm/setup.h> |
Nicholas Piggin | a048a07 | 2018-05-22 09:00:00 +1000 | [diff] [blame] | 24 | #include <asm/security_features.h> |
Benjamin Herrenschmidt | 9402c68 | 2016-07-05 15:03:41 +1000 | [diff] [blame] | 25 | #include <asm/firmware.h> |
Jordan Niethe | 7534625 | 2020-05-06 13:40:26 +1000 | [diff] [blame] | 26 | #include <asm/inst.h> |
Michael Ellerman | 51c52e8 | 2008-06-24 11:32:36 +1000 | [diff] [blame] | 27 | |
| 28 | struct fixup_entry { |
| 29 | unsigned long mask; |
| 30 | unsigned long value; |
| 31 | long start_off; |
| 32 | long end_off; |
Michael Ellerman | fac23fe | 2008-06-24 11:32:54 +1000 | [diff] [blame] | 33 | long alt_start_off; |
| 34 | long alt_end_off; |
Michael Ellerman | 51c52e8 | 2008-06-24 11:32:36 +1000 | [diff] [blame] | 35 | }; |
| 36 | |
Christophe Leroy | 69d4d6e | 2021-05-20 13:50:45 +0000 | [diff] [blame] | 37 | static u32 *calc_addr(struct fixup_entry *fcur, long offset) |
Michael Ellerman | 51c52e8 | 2008-06-24 11:32:36 +1000 | [diff] [blame] | 38 | { |
Michael Ellerman | 9b1a735 | 2008-06-24 11:33:02 +1000 | [diff] [blame] | 39 | /* |
| 40 | * We store the offset to the code as a negative offset from |
| 41 | * the start of the alt_entry, to support the VDSO. This |
| 42 | * routine converts that back into an actual address. |
| 43 | */ |
Christophe Leroy | 69d4d6e | 2021-05-20 13:50:45 +0000 | [diff] [blame] | 44 | return (u32 *)((unsigned long)fcur + offset); |
Michael Ellerman | 9b1a735 | 2008-06-24 11:33:02 +1000 | [diff] [blame] | 45 | } |
| 46 | |
Christophe Leroy | 69d4d6e | 2021-05-20 13:50:45 +0000 | [diff] [blame] | 47 | static int patch_alt_instruction(u32 *src, u32 *dest, u32 *alt_start, u32 *alt_end) |
Michael Ellerman | 9b1a735 | 2008-06-24 11:33:02 +1000 | [diff] [blame] | 48 | { |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame] | 49 | int err; |
Christophe Leroy | c545b9f | 2021-11-29 18:49:38 +0100 | [diff] [blame] | 50 | ppc_inst_t instr; |
Michael Ellerman | 9b1a735 | 2008-06-24 11:33:02 +1000 | [diff] [blame] | 51 | |
Jordan Niethe | f8faaff | 2020-05-06 13:40:32 +1000 | [diff] [blame] | 52 | instr = ppc_inst_read(src); |
Michael Ellerman | 9b1a735 | 2008-06-24 11:33:02 +1000 | [diff] [blame] | 53 | |
Christophe Leroy | 18c8596 | 2021-05-20 13:50:42 +0000 | [diff] [blame] | 54 | if (instr_is_relative_branch(ppc_inst_read(src))) { |
Christophe Leroy | 69d4d6e | 2021-05-20 13:50:45 +0000 | [diff] [blame] | 55 | u32 *target = (u32 *)branch_target(src); |
Michael Ellerman | 9b1a735 | 2008-06-24 11:33:02 +1000 | [diff] [blame] | 56 | |
| 57 | /* Branch within the section doesn't need translating */ |
Michael Ellerman | b885858 | 2018-04-16 23:25:19 +1000 | [diff] [blame] | 58 | if (target < alt_start || target > alt_end) { |
Jordan Niethe | 7c95d88 | 2020-05-06 13:40:25 +1000 | [diff] [blame] | 59 | err = translate_branch(&instr, dest, src); |
| 60 | if (err) |
Michael Ellerman | 9b1a735 | 2008-06-24 11:33:02 +1000 | [diff] [blame] | 61 | return 1; |
| 62 | } |
| 63 | } |
| 64 | |
Christophe Leroy | 8183d99 | 2017-11-24 08:31:09 +0100 | [diff] [blame] | 65 | raw_patch_instruction(dest, instr); |
Michael Ellerman | 9b1a735 | 2008-06-24 11:33:02 +1000 | [diff] [blame] | 66 | |
| 67 | return 0; |
| 68 | } |
| 69 | |
| 70 | static int patch_feature_section(unsigned long value, struct fixup_entry *fcur) |
| 71 | { |
Christophe Leroy | 69d4d6e | 2021-05-20 13:50:45 +0000 | [diff] [blame] | 72 | u32 *start, *end, *alt_start, *alt_end, *src, *dest; |
Michael Ellerman | 9b1a735 | 2008-06-24 11:33:02 +1000 | [diff] [blame] | 73 | |
| 74 | start = calc_addr(fcur, fcur->start_off); |
| 75 | end = calc_addr(fcur, fcur->end_off); |
| 76 | alt_start = calc_addr(fcur, fcur->alt_start_off); |
| 77 | alt_end = calc_addr(fcur, fcur->alt_end_off); |
| 78 | |
| 79 | if ((alt_end - alt_start) > (end - start)) |
| 80 | return 1; |
Michael Ellerman | 51c52e8 | 2008-06-24 11:32:36 +1000 | [diff] [blame] | 81 | |
| 82 | if ((value & fcur->mask) == fcur->value) |
Michael Ellerman | 9b1a735 | 2008-06-24 11:33:02 +1000 | [diff] [blame] | 83 | return 0; |
Michael Ellerman | 51c52e8 | 2008-06-24 11:32:36 +1000 | [diff] [blame] | 84 | |
Michael Ellerman | 9b1a735 | 2008-06-24 11:33:02 +1000 | [diff] [blame] | 85 | src = alt_start; |
| 86 | dest = start; |
Michael Ellerman | 51c52e8 | 2008-06-24 11:32:36 +1000 | [diff] [blame] | 87 | |
Michael Ellerman | c5ff46d | 2020-05-22 23:33:18 +1000 | [diff] [blame] | 88 | for (; src < alt_end; src = ppc_inst_next(src, src), |
| 89 | dest = ppc_inst_next(dest, dest)) { |
Michael Ellerman | 9b1a735 | 2008-06-24 11:33:02 +1000 | [diff] [blame] | 90 | if (patch_alt_instruction(src, dest, alt_start, alt_end)) |
| 91 | return 1; |
Michael Ellerman | 51c52e8 | 2008-06-24 11:32:36 +1000 | [diff] [blame] | 92 | } |
Michael Ellerman | 9b1a735 | 2008-06-24 11:33:02 +1000 | [diff] [blame] | 93 | |
Christophe Leroy | 69d4d6e | 2021-05-20 13:50:45 +0000 | [diff] [blame] | 94 | for (; dest < end; dest++) |
| 95 | raw_patch_instruction(dest, ppc_inst(PPC_RAW_NOP())); |
Michael Ellerman | 9b1a735 | 2008-06-24 11:33:02 +1000 | [diff] [blame] | 96 | |
| 97 | return 0; |
Michael Ellerman | 51c52e8 | 2008-06-24 11:32:36 +1000 | [diff] [blame] | 98 | } |
| 99 | |
| 100 | void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) |
| 101 | { |
| 102 | struct fixup_entry *fcur, *fend; |
| 103 | |
| 104 | fcur = fixup_start; |
| 105 | fend = fixup_end; |
| 106 | |
Michael Ellerman | 9b1a735 | 2008-06-24 11:33:02 +1000 | [diff] [blame] | 107 | for (; fcur < fend; fcur++) { |
| 108 | if (patch_feature_section(value, fcur)) { |
Michael Ellerman | 1856c02 | 2008-07-17 14:46:00 +1000 | [diff] [blame] | 109 | WARN_ON(1); |
Michael Ellerman | 9b1a735 | 2008-06-24 11:33:02 +1000 | [diff] [blame] | 110 | printk("Unable to patch feature section at %p - %p" \ |
| 111 | " with %p - %p\n", |
| 112 | calc_addr(fcur, fcur->start_off), |
| 113 | calc_addr(fcur, fcur->end_off), |
| 114 | calc_addr(fcur, fcur->alt_start_off), |
| 115 | calc_addr(fcur, fcur->alt_end_off)); |
| 116 | } |
| 117 | } |
Michael Ellerman | 51c52e8 | 2008-06-24 11:32:36 +1000 | [diff] [blame] | 118 | } |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 119 | |
Michael Ellerman | aa8a5e0 | 2018-01-10 03:07:15 +1100 | [diff] [blame] | 120 | #ifdef CONFIG_PPC_BOOK3S_64 |
Breno Leitao | 3b30c6e | 2018-10-22 11:54:17 -0300 | [diff] [blame] | 121 | static void do_stf_entry_barrier_fixups(enum stf_barrier_type types) |
Nicholas Piggin | a048a07 | 2018-05-22 09:00:00 +1000 | [diff] [blame] | 122 | { |
| 123 | unsigned int instrs[3], *dest; |
| 124 | long *start, *end; |
| 125 | int i; |
| 126 | |
Daniel Axtens | 1fc0c27 | 2020-12-02 01:43:44 +1100 | [diff] [blame] | 127 | start = PTRRELOC(&__start___stf_entry_barrier_fixup); |
Nicholas Piggin | a048a07 | 2018-05-22 09:00:00 +1000 | [diff] [blame] | 128 | end = PTRRELOC(&__stop___stf_entry_barrier_fixup); |
| 129 | |
Christophe Leroy | ef909ba | 2021-05-20 10:23:09 +0000 | [diff] [blame] | 130 | instrs[0] = PPC_RAW_NOP(); |
| 131 | instrs[1] = PPC_RAW_NOP(); |
| 132 | instrs[2] = PPC_RAW_NOP(); |
Nicholas Piggin | a048a07 | 2018-05-22 09:00:00 +1000 | [diff] [blame] | 133 | |
| 134 | i = 0; |
| 135 | if (types & STF_BARRIER_FALLBACK) { |
Christophe Leroy | ef909ba | 2021-05-20 10:23:09 +0000 | [diff] [blame] | 136 | instrs[i++] = PPC_RAW_MFLR(_R10); |
| 137 | instrs[i++] = PPC_RAW_NOP(); /* branch patched below */ |
| 138 | instrs[i++] = PPC_RAW_MTLR(_R10); |
Nicholas Piggin | a048a07 | 2018-05-22 09:00:00 +1000 | [diff] [blame] | 139 | } else if (types & STF_BARRIER_EIEIO) { |
Christophe Leroy | ef909ba | 2021-05-20 10:23:09 +0000 | [diff] [blame] | 140 | instrs[i++] = PPC_RAW_EIEIO() | 0x02000000; /* eieio + bit 6 hint */ |
Nicholas Piggin | a048a07 | 2018-05-22 09:00:00 +1000 | [diff] [blame] | 141 | } else if (types & STF_BARRIER_SYNC_ORI) { |
Christophe Leroy | ef909ba | 2021-05-20 10:23:09 +0000 | [diff] [blame] | 142 | instrs[i++] = PPC_RAW_SYNC(); |
| 143 | instrs[i++] = PPC_RAW_LD(_R10, _R13, 0); |
| 144 | instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */ |
Nicholas Piggin | a048a07 | 2018-05-22 09:00:00 +1000 | [diff] [blame] | 145 | } |
| 146 | |
| 147 | for (i = 0; start < end; start++, i++) { |
| 148 | dest = (void *)start + *start; |
| 149 | |
| 150 | pr_devel("patching dest %lx\n", (unsigned long)dest); |
| 151 | |
Michael Ellerman | 5b48ba2 | 2021-05-14 00:08:00 +1000 | [diff] [blame] | 152 | // See comment in do_entry_flush_fixups() RE order of patching |
| 153 | if (types & STF_BARRIER_FALLBACK) { |
Christophe Leroy | 69d4d6e | 2021-05-20 13:50:45 +0000 | [diff] [blame] | 154 | patch_instruction(dest, ppc_inst(instrs[0])); |
| 155 | patch_instruction(dest + 2, ppc_inst(instrs[2])); |
| 156 | patch_branch(dest + 1, |
Michael Ellerman | 5b48ba2 | 2021-05-14 00:08:00 +1000 | [diff] [blame] | 157 | (unsigned long)&stf_barrier_fallback, BRANCH_SET_LINK); |
| 158 | } else { |
Christophe Leroy | 69d4d6e | 2021-05-20 13:50:45 +0000 | [diff] [blame] | 159 | patch_instruction(dest + 1, ppc_inst(instrs[1])); |
| 160 | patch_instruction(dest + 2, ppc_inst(instrs[2])); |
| 161 | patch_instruction(dest, ppc_inst(instrs[0])); |
Michael Ellerman | 5b48ba2 | 2021-05-14 00:08:00 +1000 | [diff] [blame] | 162 | } |
Nicholas Piggin | a048a07 | 2018-05-22 09:00:00 +1000 | [diff] [blame] | 163 | } |
| 164 | |
| 165 | printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i, |
| 166 | (types == STF_BARRIER_NONE) ? "no" : |
| 167 | (types == STF_BARRIER_FALLBACK) ? "fallback" : |
| 168 | (types == STF_BARRIER_EIEIO) ? "eieio" : |
| 169 | (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync" |
| 170 | : "unknown"); |
| 171 | } |
| 172 | |
Breno Leitao | 3b30c6e | 2018-10-22 11:54:17 -0300 | [diff] [blame] | 173 | static void do_stf_exit_barrier_fixups(enum stf_barrier_type types) |
Nicholas Piggin | a048a07 | 2018-05-22 09:00:00 +1000 | [diff] [blame] | 174 | { |
| 175 | unsigned int instrs[6], *dest; |
| 176 | long *start, *end; |
| 177 | int i; |
| 178 | |
Daniel Axtens | 1fc0c27 | 2020-12-02 01:43:44 +1100 | [diff] [blame] | 179 | start = PTRRELOC(&__start___stf_exit_barrier_fixup); |
Nicholas Piggin | a048a07 | 2018-05-22 09:00:00 +1000 | [diff] [blame] | 180 | end = PTRRELOC(&__stop___stf_exit_barrier_fixup); |
| 181 | |
Christophe Leroy | ef909ba | 2021-05-20 10:23:09 +0000 | [diff] [blame] | 182 | instrs[0] = PPC_RAW_NOP(); |
| 183 | instrs[1] = PPC_RAW_NOP(); |
| 184 | instrs[2] = PPC_RAW_NOP(); |
| 185 | instrs[3] = PPC_RAW_NOP(); |
| 186 | instrs[4] = PPC_RAW_NOP(); |
| 187 | instrs[5] = PPC_RAW_NOP(); |
Nicholas Piggin | a048a07 | 2018-05-22 09:00:00 +1000 | [diff] [blame] | 188 | |
| 189 | i = 0; |
| 190 | if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) { |
| 191 | if (cpu_has_feature(CPU_FTR_HVMODE)) { |
Christophe Leroy | ef909ba | 2021-05-20 10:23:09 +0000 | [diff] [blame] | 192 | instrs[i++] = PPC_RAW_MTSPR(SPRN_HSPRG1, _R13); |
| 193 | instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_HSPRG0); |
Nicholas Piggin | a048a07 | 2018-05-22 09:00:00 +1000 | [diff] [blame] | 194 | } else { |
Christophe Leroy | ef909ba | 2021-05-20 10:23:09 +0000 | [diff] [blame] | 195 | instrs[i++] = PPC_RAW_MTSPR(SPRN_SPRG2, _R13); |
| 196 | instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_SPRG1); |
Nicholas Piggin | a048a07 | 2018-05-22 09:00:00 +1000 | [diff] [blame] | 197 | } |
Christophe Leroy | ef909ba | 2021-05-20 10:23:09 +0000 | [diff] [blame] | 198 | instrs[i++] = PPC_RAW_SYNC(); |
| 199 | instrs[i++] = PPC_RAW_LD(_R13, _R13, 0); |
| 200 | instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */ |
| 201 | if (cpu_has_feature(CPU_FTR_HVMODE)) |
| 202 | instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_HSPRG1); |
| 203 | else |
| 204 | instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_SPRG2); |
Nicholas Piggin | a048a07 | 2018-05-22 09:00:00 +1000 | [diff] [blame] | 205 | } else if (types & STF_BARRIER_EIEIO) { |
Christophe Leroy | ef909ba | 2021-05-20 10:23:09 +0000 | [diff] [blame] | 206 | instrs[i++] = PPC_RAW_EIEIO() | 0x02000000; /* eieio + bit 6 hint */ |
Nicholas Piggin | a048a07 | 2018-05-22 09:00:00 +1000 | [diff] [blame] | 207 | } |
| 208 | |
| 209 | for (i = 0; start < end; start++, i++) { |
| 210 | dest = (void *)start + *start; |
| 211 | |
| 212 | pr_devel("patching dest %lx\n", (unsigned long)dest); |
| 213 | |
Christophe Leroy | 69d4d6e | 2021-05-20 13:50:45 +0000 | [diff] [blame] | 214 | patch_instruction(dest, ppc_inst(instrs[0])); |
| 215 | patch_instruction(dest + 1, ppc_inst(instrs[1])); |
| 216 | patch_instruction(dest + 2, ppc_inst(instrs[2])); |
| 217 | patch_instruction(dest + 3, ppc_inst(instrs[3])); |
| 218 | patch_instruction(dest + 4, ppc_inst(instrs[4])); |
| 219 | patch_instruction(dest + 5, ppc_inst(instrs[5])); |
Nicholas Piggin | a048a07 | 2018-05-22 09:00:00 +1000 | [diff] [blame] | 220 | } |
| 221 | printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i, |
| 222 | (types == STF_BARRIER_NONE) ? "no" : |
| 223 | (types == STF_BARRIER_FALLBACK) ? "fallback" : |
| 224 | (types == STF_BARRIER_EIEIO) ? "eieio" : |
| 225 | (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync" |
| 226 | : "unknown"); |
| 227 | } |
| 228 | |
Nicholas Piggin | 1379974 | 2021-06-18 01:51:11 +1000 | [diff] [blame] | 229 | static bool stf_exit_reentrant = false; |
| 230 | static bool rfi_exit_reentrant = false; |
Russell Currey | 3c12b4df | 2021-10-27 17:24:10 +1000 | [diff] [blame] | 231 | static DEFINE_MUTEX(exit_flush_lock); |
Nicholas Piggin | 1379974 | 2021-06-18 01:51:11 +1000 | [diff] [blame] | 232 | |
Michael Ellerman | 8ec7791 | 2021-05-06 14:49:58 +1000 | [diff] [blame] | 233 | static int __do_stf_barrier_fixups(void *data) |
| 234 | { |
| 235 | enum stf_barrier_type *types = data; |
| 236 | |
| 237 | do_stf_entry_barrier_fixups(*types); |
| 238 | do_stf_exit_barrier_fixups(*types); |
| 239 | |
| 240 | return 0; |
| 241 | } |
Nicholas Piggin | a048a07 | 2018-05-22 09:00:00 +1000 | [diff] [blame] | 242 | |
| 243 | void do_stf_barrier_fixups(enum stf_barrier_type types) |
| 244 | { |
Michael Ellerman | 8ec7791 | 2021-05-06 14:49:58 +1000 | [diff] [blame] | 245 | /* |
| 246 | * The call to the fallback entry flush, and the fallback/sync-ori exit |
Nicholas Piggin | 1379974 | 2021-06-18 01:51:11 +1000 | [diff] [blame] | 247 | * flush can not be safely patched in/out while other CPUs are |
| 248 | * executing them. So call __do_stf_barrier_fixups() on one CPU while |
| 249 | * all other CPUs spin in the stop machine core with interrupts hard |
| 250 | * disabled. |
| 251 | * |
| 252 | * The branch to mark interrupt exits non-reentrant is enabled first, |
| 253 | * then stop_machine runs which will ensure all CPUs are out of the |
| 254 | * low level interrupt exit code before patching. After the patching, |
| 255 | * if allowed, then flip the branch to allow fast exits. |
Michael Ellerman | 8ec7791 | 2021-05-06 14:49:58 +1000 | [diff] [blame] | 256 | */ |
Russell Currey | 3c12b4df | 2021-10-27 17:24:10 +1000 | [diff] [blame] | 257 | |
| 258 | // Prevent static key update races with do_rfi_flush_fixups() |
| 259 | mutex_lock(&exit_flush_lock); |
Nicholas Piggin | 1379974 | 2021-06-18 01:51:11 +1000 | [diff] [blame] | 260 | static_branch_enable(&interrupt_exit_not_reentrant); |
| 261 | |
Michael Ellerman | 8ec7791 | 2021-05-06 14:49:58 +1000 | [diff] [blame] | 262 | stop_machine(__do_stf_barrier_fixups, &types, NULL); |
Nicholas Piggin | 1379974 | 2021-06-18 01:51:11 +1000 | [diff] [blame] | 263 | |
| 264 | if ((types & STF_BARRIER_FALLBACK) || (types & STF_BARRIER_SYNC_ORI)) |
| 265 | stf_exit_reentrant = false; |
| 266 | else |
| 267 | stf_exit_reentrant = true; |
| 268 | |
| 269 | if (stf_exit_reentrant && rfi_exit_reentrant) |
| 270 | static_branch_disable(&interrupt_exit_not_reentrant); |
Russell Currey | 3c12b4df | 2021-10-27 17:24:10 +1000 | [diff] [blame] | 271 | |
| 272 | mutex_unlock(&exit_flush_lock); |
Nicholas Piggin | a048a07 | 2018-05-22 09:00:00 +1000 | [diff] [blame] | 273 | } |
| 274 | |
Nicholas Piggin | 9a32a7e | 2020-11-17 16:59:13 +1100 | [diff] [blame] | 275 | void do_uaccess_flush_fixups(enum l1d_flush_type types) |
| 276 | { |
| 277 | unsigned int instrs[4], *dest; |
| 278 | long *start, *end; |
| 279 | int i; |
| 280 | |
| 281 | start = PTRRELOC(&__start___uaccess_flush_fixup); |
| 282 | end = PTRRELOC(&__stop___uaccess_flush_fixup); |
| 283 | |
Christophe Leroy | ef909ba | 2021-05-20 10:23:09 +0000 | [diff] [blame] | 284 | instrs[0] = PPC_RAW_NOP(); |
| 285 | instrs[1] = PPC_RAW_NOP(); |
| 286 | instrs[2] = PPC_RAW_NOP(); |
| 287 | instrs[3] = PPC_RAW_BLR(); |
Nicholas Piggin | 9a32a7e | 2020-11-17 16:59:13 +1100 | [diff] [blame] | 288 | |
| 289 | i = 0; |
| 290 | if (types == L1D_FLUSH_FALLBACK) { |
Christophe Leroy | ef909ba | 2021-05-20 10:23:09 +0000 | [diff] [blame] | 291 | instrs[3] = PPC_RAW_NOP(); |
Nicholas Piggin | 9a32a7e | 2020-11-17 16:59:13 +1100 | [diff] [blame] | 292 | /* fallthrough to fallback flush */ |
| 293 | } |
| 294 | |
| 295 | if (types & L1D_FLUSH_ORI) { |
Christophe Leroy | ef909ba | 2021-05-20 10:23:09 +0000 | [diff] [blame] | 296 | instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */ |
| 297 | instrs[i++] = PPC_RAW_ORI(_R30, _R30, 0); /* L1d flush */ |
Nicholas Piggin | 9a32a7e | 2020-11-17 16:59:13 +1100 | [diff] [blame] | 298 | } |
| 299 | |
| 300 | if (types & L1D_FLUSH_MTTRIG) |
Christophe Leroy | ef909ba | 2021-05-20 10:23:09 +0000 | [diff] [blame] | 301 | instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0); |
Nicholas Piggin | 9a32a7e | 2020-11-17 16:59:13 +1100 | [diff] [blame] | 302 | |
| 303 | for (i = 0; start < end; start++, i++) { |
| 304 | dest = (void *)start + *start; |
| 305 | |
| 306 | pr_devel("patching dest %lx\n", (unsigned long)dest); |
| 307 | |
Christophe Leroy | 69d4d6e | 2021-05-20 13:50:45 +0000 | [diff] [blame] | 308 | patch_instruction(dest, ppc_inst(instrs[0])); |
Nicholas Piggin | 9a32a7e | 2020-11-17 16:59:13 +1100 | [diff] [blame] | 309 | |
Christophe Leroy | 69d4d6e | 2021-05-20 13:50:45 +0000 | [diff] [blame] | 310 | patch_instruction(dest + 1, ppc_inst(instrs[1])); |
| 311 | patch_instruction(dest + 2, ppc_inst(instrs[2])); |
| 312 | patch_instruction(dest + 3, ppc_inst(instrs[3])); |
Nicholas Piggin | 9a32a7e | 2020-11-17 16:59:13 +1100 | [diff] [blame] | 313 | } |
| 314 | |
| 315 | printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i, |
| 316 | (types == L1D_FLUSH_NONE) ? "no" : |
| 317 | (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" : |
| 318 | (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG) |
| 319 | ? "ori+mttrig type" |
| 320 | : "ori type" : |
| 321 | (types & L1D_FLUSH_MTTRIG) ? "mttrig type" |
| 322 | : "unknown"); |
| 323 | } |
| 324 | |
Michael Ellerman | aec86b0 | 2021-05-06 14:49:59 +1000 | [diff] [blame] | 325 | static int __do_entry_flush_fixups(void *data) |
Nicholas Piggin | f796437 | 2020-11-17 16:59:12 +1100 | [diff] [blame] | 326 | { |
Michael Ellerman | aec86b0 | 2021-05-06 14:49:59 +1000 | [diff] [blame] | 327 | enum l1d_flush_type types = *(enum l1d_flush_type *)data; |
Nicholas Piggin | f796437 | 2020-11-17 16:59:12 +1100 | [diff] [blame] | 328 | unsigned int instrs[3], *dest; |
| 329 | long *start, *end; |
| 330 | int i; |
| 331 | |
Christophe Leroy | ef909ba | 2021-05-20 10:23:09 +0000 | [diff] [blame] | 332 | instrs[0] = PPC_RAW_NOP(); |
| 333 | instrs[1] = PPC_RAW_NOP(); |
| 334 | instrs[2] = PPC_RAW_NOP(); |
Nicholas Piggin | f796437 | 2020-11-17 16:59:12 +1100 | [diff] [blame] | 335 | |
| 336 | i = 0; |
| 337 | if (types == L1D_FLUSH_FALLBACK) { |
Christophe Leroy | ef909ba | 2021-05-20 10:23:09 +0000 | [diff] [blame] | 338 | instrs[i++] = PPC_RAW_MFLR(_R10); |
| 339 | instrs[i++] = PPC_RAW_NOP(); /* branch patched below */ |
| 340 | instrs[i++] = PPC_RAW_MTLR(_R10); |
Nicholas Piggin | f796437 | 2020-11-17 16:59:12 +1100 | [diff] [blame] | 341 | } |
| 342 | |
| 343 | if (types & L1D_FLUSH_ORI) { |
Christophe Leroy | ef909ba | 2021-05-20 10:23:09 +0000 | [diff] [blame] | 344 | instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */ |
| 345 | instrs[i++] = PPC_RAW_ORI(_R30, _R30, 0); /* L1d flush */ |
Nicholas Piggin | f796437 | 2020-11-17 16:59:12 +1100 | [diff] [blame] | 346 | } |
| 347 | |
| 348 | if (types & L1D_FLUSH_MTTRIG) |
Christophe Leroy | ef909ba | 2021-05-20 10:23:09 +0000 | [diff] [blame] | 349 | instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0); |
Nicholas Piggin | f796437 | 2020-11-17 16:59:12 +1100 | [diff] [blame] | 350 | |
Michael Ellerman | 49b39ec24 | 2021-05-14 00:07:59 +1000 | [diff] [blame] | 351 | /* |
| 352 | * If we're patching in or out the fallback flush we need to be careful about the |
| 353 | * order in which we patch instructions. That's because it's possible we could |
| 354 | * take a page fault after patching one instruction, so the sequence of |
| 355 | * instructions must be safe even in a half patched state. |
| 356 | * |
| 357 | * To make that work, when patching in the fallback flush we patch in this order: |
| 358 | * - the mflr (dest) |
| 359 | * - the mtlr (dest + 2) |
| 360 | * - the branch (dest + 1) |
| 361 | * |
| 362 | * That ensures the sequence is safe to execute at any point. In contrast if we |
| 363 | * patch the mtlr last, it's possible we could return from the branch and not |
| 364 | * restore LR, leading to a crash later. |
| 365 | * |
| 366 | * When patching out the fallback flush (either with nops or another flush type), |
| 367 | * we patch in this order: |
| 368 | * - the branch (dest + 1) |
| 369 | * - the mtlr (dest + 2) |
| 370 | * - the mflr (dest) |
| 371 | * |
| 372 | * Note we are protected by stop_machine() from other CPUs executing the code in a |
| 373 | * semi-patched state. |
| 374 | */ |
| 375 | |
Nicholas Piggin | 08685be | 2021-01-11 16:24:08 +1000 | [diff] [blame] | 376 | start = PTRRELOC(&__start___entry_flush_fixup); |
| 377 | end = PTRRELOC(&__stop___entry_flush_fixup); |
Nicholas Piggin | f796437 | 2020-11-17 16:59:12 +1100 | [diff] [blame] | 378 | for (i = 0; start < end; start++, i++) { |
| 379 | dest = (void *)start + *start; |
| 380 | |
| 381 | pr_devel("patching dest %lx\n", (unsigned long)dest); |
| 382 | |
Michael Ellerman | 49b39ec24 | 2021-05-14 00:07:59 +1000 | [diff] [blame] | 383 | if (types == L1D_FLUSH_FALLBACK) { |
Christophe Leroy | 69d4d6e | 2021-05-20 13:50:45 +0000 | [diff] [blame] | 384 | patch_instruction(dest, ppc_inst(instrs[0])); |
| 385 | patch_instruction(dest + 2, ppc_inst(instrs[2])); |
| 386 | patch_branch(dest + 1, |
Michael Ellerman | 49b39ec24 | 2021-05-14 00:07:59 +1000 | [diff] [blame] | 387 | (unsigned long)&entry_flush_fallback, BRANCH_SET_LINK); |
| 388 | } else { |
Christophe Leroy | 69d4d6e | 2021-05-20 13:50:45 +0000 | [diff] [blame] | 389 | patch_instruction(dest + 1, ppc_inst(instrs[1])); |
| 390 | patch_instruction(dest + 2, ppc_inst(instrs[2])); |
| 391 | patch_instruction(dest, ppc_inst(instrs[0])); |
Michael Ellerman | 49b39ec24 | 2021-05-14 00:07:59 +1000 | [diff] [blame] | 392 | } |
Nicholas Piggin | f796437 | 2020-11-17 16:59:12 +1100 | [diff] [blame] | 393 | } |
| 394 | |
Nicholas Piggin | 08685be | 2021-01-11 16:24:08 +1000 | [diff] [blame] | 395 | start = PTRRELOC(&__start___scv_entry_flush_fixup); |
| 396 | end = PTRRELOC(&__stop___scv_entry_flush_fixup); |
| 397 | for (; start < end; start++, i++) { |
| 398 | dest = (void *)start + *start; |
| 399 | |
| 400 | pr_devel("patching dest %lx\n", (unsigned long)dest); |
| 401 | |
Michael Ellerman | 49b39ec24 | 2021-05-14 00:07:59 +1000 | [diff] [blame] | 402 | if (types == L1D_FLUSH_FALLBACK) { |
Christophe Leroy | 69d4d6e | 2021-05-20 13:50:45 +0000 | [diff] [blame] | 403 | patch_instruction(dest, ppc_inst(instrs[0])); |
| 404 | patch_instruction(dest + 2, ppc_inst(instrs[2])); |
| 405 | patch_branch(dest + 1, |
Michael Ellerman | 49b39ec24 | 2021-05-14 00:07:59 +1000 | [diff] [blame] | 406 | (unsigned long)&scv_entry_flush_fallback, BRANCH_SET_LINK); |
| 407 | } else { |
Christophe Leroy | 69d4d6e | 2021-05-20 13:50:45 +0000 | [diff] [blame] | 408 | patch_instruction(dest + 1, ppc_inst(instrs[1])); |
| 409 | patch_instruction(dest + 2, ppc_inst(instrs[2])); |
| 410 | patch_instruction(dest, ppc_inst(instrs[0])); |
Michael Ellerman | 49b39ec24 | 2021-05-14 00:07:59 +1000 | [diff] [blame] | 411 | } |
Nicholas Piggin | 08685be | 2021-01-11 16:24:08 +1000 | [diff] [blame] | 412 | } |
| 413 | |
| 414 | |
Nicholas Piggin | f796437 | 2020-11-17 16:59:12 +1100 | [diff] [blame] | 415 | printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i, |
| 416 | (types == L1D_FLUSH_NONE) ? "no" : |
| 417 | (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" : |
| 418 | (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG) |
| 419 | ? "ori+mttrig type" |
| 420 | : "ori type" : |
| 421 | (types & L1D_FLUSH_MTTRIG) ? "mttrig type" |
| 422 | : "unknown"); |
Michael Ellerman | aec86b0 | 2021-05-06 14:49:59 +1000 | [diff] [blame] | 423 | |
| 424 | return 0; |
| 425 | } |
| 426 | |
| 427 | void do_entry_flush_fixups(enum l1d_flush_type types) |
| 428 | { |
| 429 | /* |
| 430 | * The call to the fallback flush can not be safely patched in/out while |
| 431 | * other CPUs are executing it. So call __do_entry_flush_fixups() on one |
| 432 | * CPU while all other CPUs spin in the stop machine core with interrupts |
| 433 | * hard disabled. |
| 434 | */ |
| 435 | stop_machine(__do_entry_flush_fixups, &types, NULL); |
Nicholas Piggin | f796437 | 2020-11-17 16:59:12 +1100 | [diff] [blame] | 436 | } |
| 437 | |
Nicholas Piggin | 1379974 | 2021-06-18 01:51:11 +1000 | [diff] [blame] | 438 | static int __do_rfi_flush_fixups(void *data) |
Michael Ellerman | aa8a5e0 | 2018-01-10 03:07:15 +1100 | [diff] [blame] | 439 | { |
Nicholas Piggin | 1379974 | 2021-06-18 01:51:11 +1000 | [diff] [blame] | 440 | enum l1d_flush_type types = *(enum l1d_flush_type *)data; |
Michael Ellerman | aa8a5e0 | 2018-01-10 03:07:15 +1100 | [diff] [blame] | 441 | unsigned int instrs[3], *dest; |
| 442 | long *start, *end; |
| 443 | int i; |
| 444 | |
Daniel Axtens | 1fc0c27 | 2020-12-02 01:43:44 +1100 | [diff] [blame] | 445 | start = PTRRELOC(&__start___rfi_flush_fixup); |
Michael Ellerman | aa8a5e0 | 2018-01-10 03:07:15 +1100 | [diff] [blame] | 446 | end = PTRRELOC(&__stop___rfi_flush_fixup); |
| 447 | |
Christophe Leroy | ef909ba | 2021-05-20 10:23:09 +0000 | [diff] [blame] | 448 | instrs[0] = PPC_RAW_NOP(); |
| 449 | instrs[1] = PPC_RAW_NOP(); |
| 450 | instrs[2] = PPC_RAW_NOP(); |
Michael Ellerman | aa8a5e0 | 2018-01-10 03:07:15 +1100 | [diff] [blame] | 451 | |
| 452 | if (types & L1D_FLUSH_FALLBACK) |
| 453 | /* b .+16 to fallback flush */ |
Christophe Leroy | ef909ba | 2021-05-20 10:23:09 +0000 | [diff] [blame] | 454 | instrs[0] = PPC_INST_BRANCH | 16; |
Michael Ellerman | aa8a5e0 | 2018-01-10 03:07:15 +1100 | [diff] [blame] | 455 | |
| 456 | i = 0; |
| 457 | if (types & L1D_FLUSH_ORI) { |
Christophe Leroy | ef909ba | 2021-05-20 10:23:09 +0000 | [diff] [blame] | 458 | instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */ |
| 459 | instrs[i++] = PPC_RAW_ORI(_R30, _R30, 0); /* L1d flush */ |
Michael Ellerman | aa8a5e0 | 2018-01-10 03:07:15 +1100 | [diff] [blame] | 460 | } |
| 461 | |
| 462 | if (types & L1D_FLUSH_MTTRIG) |
Christophe Leroy | ef909ba | 2021-05-20 10:23:09 +0000 | [diff] [blame] | 463 | instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0); |
Michael Ellerman | aa8a5e0 | 2018-01-10 03:07:15 +1100 | [diff] [blame] | 464 | |
| 465 | for (i = 0; start < end; start++, i++) { |
| 466 | dest = (void *)start + *start; |
| 467 | |
| 468 | pr_devel("patching dest %lx\n", (unsigned long)dest); |
| 469 | |
Christophe Leroy | 69d4d6e | 2021-05-20 13:50:45 +0000 | [diff] [blame] | 470 | patch_instruction(dest, ppc_inst(instrs[0])); |
| 471 | patch_instruction(dest + 1, ppc_inst(instrs[1])); |
| 472 | patch_instruction(dest + 2, ppc_inst(instrs[2])); |
Michael Ellerman | aa8a5e0 | 2018-01-10 03:07:15 +1100 | [diff] [blame] | 473 | } |
| 474 | |
Mauricio Faria de Oliveira | 0063d61 | 2018-03-14 19:40:41 -0300 | [diff] [blame] | 475 | printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i, |
| 476 | (types == L1D_FLUSH_NONE) ? "no" : |
| 477 | (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" : |
| 478 | (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG) |
| 479 | ? "ori+mttrig type" |
| 480 | : "ori type" : |
| 481 | (types & L1D_FLUSH_MTTRIG) ? "mttrig type" |
| 482 | : "unknown"); |
Nicholas Piggin | 1379974 | 2021-06-18 01:51:11 +1000 | [diff] [blame] | 483 | |
| 484 | return 0; |
| 485 | } |
| 486 | |
| 487 | void do_rfi_flush_fixups(enum l1d_flush_type types) |
| 488 | { |
| 489 | /* |
| 490 | * stop_machine gets all CPUs out of the interrupt exit handler same |
| 491 | * as do_stf_barrier_fixups. do_rfi_flush_fixups patching can run |
| 492 | * without stop_machine, so this could be achieved with a broadcast |
| 493 | * IPI instead, but this matches the stf sequence. |
| 494 | */ |
Russell Currey | 3c12b4df | 2021-10-27 17:24:10 +1000 | [diff] [blame] | 495 | |
| 496 | // Prevent static key update races with do_stf_barrier_fixups() |
| 497 | mutex_lock(&exit_flush_lock); |
Nicholas Piggin | 1379974 | 2021-06-18 01:51:11 +1000 | [diff] [blame] | 498 | static_branch_enable(&interrupt_exit_not_reentrant); |
| 499 | |
| 500 | stop_machine(__do_rfi_flush_fixups, &types, NULL); |
| 501 | |
| 502 | if (types & L1D_FLUSH_FALLBACK) |
| 503 | rfi_exit_reentrant = false; |
| 504 | else |
| 505 | rfi_exit_reentrant = true; |
| 506 | |
| 507 | if (stf_exit_reentrant && rfi_exit_reentrant) |
| 508 | static_branch_disable(&interrupt_exit_not_reentrant); |
Russell Currey | 3c12b4df | 2021-10-27 17:24:10 +1000 | [diff] [blame] | 509 | |
| 510 | mutex_unlock(&exit_flush_lock); |
Michael Ellerman | aa8a5e0 | 2018-01-10 03:07:15 +1100 | [diff] [blame] | 511 | } |
Michal Suchanek | 2eea7f0 | 2018-04-24 14:15:55 +1000 | [diff] [blame] | 512 | |
Michal Suchanek | 815069c | 2018-04-24 14:15:56 +1000 | [diff] [blame] | 513 | void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end) |
Michal Suchanek | 2eea7f0 | 2018-04-24 14:15:55 +1000 | [diff] [blame] | 514 | { |
| 515 | unsigned int instr, *dest; |
| 516 | long *start, *end; |
| 517 | int i; |
| 518 | |
Michal Suchanek | 815069c | 2018-04-24 14:15:56 +1000 | [diff] [blame] | 519 | start = fixup_start; |
| 520 | end = fixup_end; |
Michal Suchanek | 2eea7f0 | 2018-04-24 14:15:55 +1000 | [diff] [blame] | 521 | |
Christophe Leroy | ef909ba | 2021-05-20 10:23:09 +0000 | [diff] [blame] | 522 | instr = PPC_RAW_NOP(); |
Michal Suchanek | 2eea7f0 | 2018-04-24 14:15:55 +1000 | [diff] [blame] | 523 | |
| 524 | if (enable) { |
| 525 | pr_info("barrier-nospec: using ORI speculation barrier\n"); |
Christophe Leroy | ef909ba | 2021-05-20 10:23:09 +0000 | [diff] [blame] | 526 | instr = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */ |
Michal Suchanek | 2eea7f0 | 2018-04-24 14:15:55 +1000 | [diff] [blame] | 527 | } |
| 528 | |
| 529 | for (i = 0; start < end; start++, i++) { |
| 530 | dest = (void *)start + *start; |
| 531 | |
| 532 | pr_devel("patching dest %lx\n", (unsigned long)dest); |
Christophe Leroy | 69d4d6e | 2021-05-20 13:50:45 +0000 | [diff] [blame] | 533 | patch_instruction(dest, ppc_inst(instr)); |
Michal Suchanek | 2eea7f0 | 2018-04-24 14:15:55 +1000 | [diff] [blame] | 534 | } |
| 535 | |
| 536 | printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); |
| 537 | } |
| 538 | |
Michael Ellerman | 179ab1c | 2018-07-28 09:06:34 +1000 | [diff] [blame] | 539 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
| 540 | |
| 541 | #ifdef CONFIG_PPC_BARRIER_NOSPEC |
Michal Suchanek | 815069c | 2018-04-24 14:15:56 +1000 | [diff] [blame] | 542 | void do_barrier_nospec_fixups(bool enable) |
| 543 | { |
| 544 | void *start, *end; |
| 545 | |
Daniel Axtens | 1fc0c27 | 2020-12-02 01:43:44 +1100 | [diff] [blame] | 546 | start = PTRRELOC(&__start___barrier_nospec_fixup); |
Michal Suchanek | 815069c | 2018-04-24 14:15:56 +1000 | [diff] [blame] | 547 | end = PTRRELOC(&__stop___barrier_nospec_fixup); |
| 548 | |
| 549 | do_barrier_nospec_fixups_range(enable, start, end); |
| 550 | } |
Michael Ellerman | 179ab1c | 2018-07-28 09:06:34 +1000 | [diff] [blame] | 551 | #endif /* CONFIG_PPC_BARRIER_NOSPEC */ |
Michael Ellerman | aa8a5e0 | 2018-01-10 03:07:15 +1100 | [diff] [blame] | 552 | |
Diana Craciun | ebcd1bf | 2018-07-28 09:06:37 +1000 | [diff] [blame] | 553 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 554 | void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end) |
| 555 | { |
| 556 | unsigned int instr[2], *dest; |
| 557 | long *start, *end; |
| 558 | int i; |
| 559 | |
| 560 | start = fixup_start; |
| 561 | end = fixup_end; |
| 562 | |
Christophe Leroy | ef909ba | 2021-05-20 10:23:09 +0000 | [diff] [blame] | 563 | instr[0] = PPC_RAW_NOP(); |
| 564 | instr[1] = PPC_RAW_NOP(); |
Diana Craciun | ebcd1bf | 2018-07-28 09:06:37 +1000 | [diff] [blame] | 565 | |
| 566 | if (enable) { |
| 567 | pr_info("barrier-nospec: using isync; sync as speculation barrier\n"); |
Christophe Leroy | ef909ba | 2021-05-20 10:23:09 +0000 | [diff] [blame] | 568 | instr[0] = PPC_RAW_ISYNC(); |
| 569 | instr[1] = PPC_RAW_SYNC(); |
Diana Craciun | ebcd1bf | 2018-07-28 09:06:37 +1000 | [diff] [blame] | 570 | } |
| 571 | |
| 572 | for (i = 0; start < end; start++, i++) { |
| 573 | dest = (void *)start + *start; |
| 574 | |
| 575 | pr_devel("patching dest %lx\n", (unsigned long)dest); |
Christophe Leroy | 69d4d6e | 2021-05-20 13:50:45 +0000 | [diff] [blame] | 576 | patch_instruction(dest, ppc_inst(instr[0])); |
| 577 | patch_instruction(dest + 1, ppc_inst(instr[1])); |
Diana Craciun | ebcd1bf | 2018-07-28 09:06:37 +1000 | [diff] [blame] | 578 | } |
| 579 | |
| 580 | printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); |
| 581 | } |
Diana Craciun | 76a5eaa | 2018-12-12 16:03:00 +0200 | [diff] [blame] | 582 | |
Nick Child | ce0c6be | 2021-12-16 17:00:17 -0500 | [diff] [blame] | 583 | static void __init patch_btb_flush_section(long *curr) |
Diana Craciun | 76a5eaa | 2018-12-12 16:03:00 +0200 | [diff] [blame] | 584 | { |
| 585 | unsigned int *start, *end; |
| 586 | |
| 587 | start = (void *)curr + *curr; |
| 588 | end = (void *)curr + *(curr + 1); |
| 589 | for (; start < end; start++) { |
| 590 | pr_devel("patching dest %lx\n", (unsigned long)start); |
Christophe Leroy | 69d4d6e | 2021-05-20 13:50:45 +0000 | [diff] [blame] | 591 | patch_instruction(start, ppc_inst(PPC_RAW_NOP())); |
Diana Craciun | 76a5eaa | 2018-12-12 16:03:00 +0200 | [diff] [blame] | 592 | } |
| 593 | } |
| 594 | |
Nick Child | ce0c6be | 2021-12-16 17:00:17 -0500 | [diff] [blame] | 595 | void __init do_btb_flush_fixups(void) |
Diana Craciun | 76a5eaa | 2018-12-12 16:03:00 +0200 | [diff] [blame] | 596 | { |
| 597 | long *start, *end; |
| 598 | |
| 599 | start = PTRRELOC(&__start__btb_flush_fixup); |
| 600 | end = PTRRELOC(&__stop__btb_flush_fixup); |
| 601 | |
| 602 | for (; start < end; start += 2) |
| 603 | patch_btb_flush_section(start); |
| 604 | } |
Diana Craciun | ebcd1bf | 2018-07-28 09:06:37 +1000 | [diff] [blame] | 605 | #endif /* CONFIG_PPC_FSL_BOOK3E */ |
| 606 | |
Kumar Gala | 2d1b202 | 2008-07-02 01:16:40 +1000 | [diff] [blame] | 607 | void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) |
| 608 | { |
Benjamin Herrenschmidt | 3d98ffb | 2010-02-26 18:29:17 +1100 | [diff] [blame] | 609 | long *start, *end; |
Christophe Leroy | 69d4d6e | 2021-05-20 13:50:45 +0000 | [diff] [blame] | 610 | u32 *dest; |
Kumar Gala | 2d1b202 | 2008-07-02 01:16:40 +1000 | [diff] [blame] | 611 | |
| 612 | if (!(value & CPU_FTR_LWSYNC)) |
| 613 | return ; |
| 614 | |
| 615 | start = fixup_start; |
| 616 | end = fixup_end; |
| 617 | |
| 618 | for (; start < end; start++) { |
| 619 | dest = (void *)start + *start; |
Jordan Niethe | 7534625 | 2020-05-06 13:40:26 +1000 | [diff] [blame] | 620 | raw_patch_instruction(dest, ppc_inst(PPC_INST_LWSYNC)); |
Kumar Gala | 2d1b202 | 2008-07-02 01:16:40 +1000 | [diff] [blame] | 621 | } |
| 622 | } |
| 623 | |
Nick Child | ce0c6be | 2021-12-16 17:00:17 -0500 | [diff] [blame] | 624 | static void __init do_final_fixups(void) |
Anton Blanchard | d715e43 | 2011-11-14 12:54:47 +0000 | [diff] [blame] | 625 | { |
| 626 | #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE) |
Christophe Leroy | c545b9f | 2021-11-29 18:49:38 +0100 | [diff] [blame] | 627 | ppc_inst_t inst; |
Christophe Leroy | 69d4d6e | 2021-05-20 13:50:45 +0000 | [diff] [blame] | 628 | u32 *src, *dest, *end; |
Anton Blanchard | d715e43 | 2011-11-14 12:54:47 +0000 | [diff] [blame] | 629 | |
| 630 | if (PHYSICAL_START == 0) |
| 631 | return; |
| 632 | |
Christophe Leroy | 69d4d6e | 2021-05-20 13:50:45 +0000 | [diff] [blame] | 633 | src = (u32 *)(KERNELBASE + PHYSICAL_START); |
| 634 | dest = (u32 *)KERNELBASE; |
Jordan Niethe | 622cf6f | 2020-05-06 13:40:37 +1000 | [diff] [blame] | 635 | end = (void *)src + (__end_interrupts - _stext); |
Anton Blanchard | d715e43 | 2011-11-14 12:54:47 +0000 | [diff] [blame] | 636 | |
Jordan Niethe | 622cf6f | 2020-05-06 13:40:37 +1000 | [diff] [blame] | 637 | while (src < end) { |
| 638 | inst = ppc_inst_read(src); |
| 639 | raw_patch_instruction(dest, inst); |
Michael Ellerman | c5ff46d | 2020-05-22 23:33:18 +1000 | [diff] [blame] | 640 | src = ppc_inst_next(src, src); |
| 641 | dest = ppc_inst_next(dest, dest); |
Anton Blanchard | d715e43 | 2011-11-14 12:54:47 +0000 | [diff] [blame] | 642 | } |
| 643 | #endif |
| 644 | } |
| 645 | |
Michael Ellerman | a28e46f | 2016-07-26 22:29:18 +1000 | [diff] [blame] | 646 | static unsigned long __initdata saved_cpu_features; |
| 647 | static unsigned int __initdata saved_mmu_features; |
| 648 | #ifdef CONFIG_PPC64 |
| 649 | static unsigned long __initdata saved_firmware_features; |
| 650 | #endif |
| 651 | |
| 652 | void __init apply_feature_fixups(void) |
Benjamin Herrenschmidt | 9402c68 | 2016-07-05 15:03:41 +1000 | [diff] [blame] | 653 | { |
Benjamin Herrenschmidt | 2c0f995 | 2016-08-02 15:53:01 +1000 | [diff] [blame] | 654 | struct cpu_spec *spec = PTRRELOC(*PTRRELOC(&cur_cpu_spec)); |
Benjamin Herrenschmidt | 9402c68 | 2016-07-05 15:03:41 +1000 | [diff] [blame] | 655 | |
Michael Ellerman | a28e46f | 2016-07-26 22:29:18 +1000 | [diff] [blame] | 656 | *PTRRELOC(&saved_cpu_features) = spec->cpu_features; |
| 657 | *PTRRELOC(&saved_mmu_features) = spec->mmu_features; |
| 658 | |
Benjamin Herrenschmidt | 9402c68 | 2016-07-05 15:03:41 +1000 | [diff] [blame] | 659 | /* |
| 660 | * Apply the CPU-specific and firmware specific fixups to kernel text |
| 661 | * (nop out sections not relevant to this CPU or this firmware). |
| 662 | */ |
| 663 | do_feature_fixups(spec->cpu_features, |
| 664 | PTRRELOC(&__start___ftr_fixup), |
| 665 | PTRRELOC(&__stop___ftr_fixup)); |
| 666 | |
| 667 | do_feature_fixups(spec->mmu_features, |
| 668 | PTRRELOC(&__start___mmu_ftr_fixup), |
| 669 | PTRRELOC(&__stop___mmu_ftr_fixup)); |
| 670 | |
| 671 | do_lwsync_fixups(spec->cpu_features, |
| 672 | PTRRELOC(&__start___lwsync_fixup), |
| 673 | PTRRELOC(&__stop___lwsync_fixup)); |
| 674 | |
| 675 | #ifdef CONFIG_PPC64 |
Michael Ellerman | a28e46f | 2016-07-26 22:29:18 +1000 | [diff] [blame] | 676 | saved_firmware_features = powerpc_firmware_features; |
Benjamin Herrenschmidt | 9402c68 | 2016-07-05 15:03:41 +1000 | [diff] [blame] | 677 | do_feature_fixups(powerpc_firmware_features, |
| 678 | &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); |
| 679 | #endif |
| 680 | do_final_fixups(); |
Benjamin Herrenschmidt | 97f6e0c | 2016-08-10 17:27:34 +1000 | [diff] [blame] | 681 | } |
Aneesh Kumar K.V | 309b315 | 2016-07-23 14:42:38 +0530 | [diff] [blame] | 682 | |
Benjamin Herrenschmidt | 97f6e0c | 2016-08-10 17:27:34 +1000 | [diff] [blame] | 683 | void __init setup_feature_keys(void) |
| 684 | { |
Aneesh Kumar K.V | 309b315 | 2016-07-23 14:42:38 +0530 | [diff] [blame] | 685 | /* |
| 686 | * Initialise jump label. This causes all the cpu/mmu_has_feature() |
| 687 | * checks to take on their correct polarity based on the current set of |
| 688 | * CPU/MMU features. |
| 689 | */ |
| 690 | jump_label_init(); |
Kevin Hao | 4db7327 | 2016-07-23 14:42:41 +0530 | [diff] [blame] | 691 | cpu_feature_keys_init(); |
Kevin Hao | c12e6f2 | 2016-07-23 14:42:42 +0530 | [diff] [blame] | 692 | mmu_feature_keys_init(); |
Benjamin Herrenschmidt | 9402c68 | 2016-07-05 15:03:41 +1000 | [diff] [blame] | 693 | } |
| 694 | |
Michael Ellerman | a28e46f | 2016-07-26 22:29:18 +1000 | [diff] [blame] | 695 | static int __init check_features(void) |
| 696 | { |
| 697 | WARN(saved_cpu_features != cur_cpu_spec->cpu_features, |
| 698 | "CPU features changed after feature patching!\n"); |
| 699 | WARN(saved_mmu_features != cur_cpu_spec->mmu_features, |
| 700 | "MMU features changed after feature patching!\n"); |
| 701 | #ifdef CONFIG_PPC64 |
| 702 | WARN(saved_firmware_features != powerpc_firmware_features, |
| 703 | "Firmware features changed after feature patching!\n"); |
| 704 | #endif |
| 705 | |
| 706 | return 0; |
| 707 | } |
| 708 | late_initcall(check_features); |
| 709 | |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 710 | #ifdef CONFIG_FTR_FIXUP_SELFTEST |
| 711 | |
| 712 | #define check(x) \ |
| 713 | if (!(x)) printk("feature-fixups: test failed at line %d\n", __LINE__); |
| 714 | |
| 715 | /* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */ |
| 716 | static struct fixup_entry fixup; |
| 717 | |
Nick Child | ce0c6be | 2021-12-16 17:00:17 -0500 | [diff] [blame] | 718 | static long __init calc_offset(struct fixup_entry *entry, unsigned int *p) |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 719 | { |
| 720 | return (unsigned long)p - (unsigned long)entry; |
| 721 | } |
| 722 | |
Nick Child | ce0c6be | 2021-12-16 17:00:17 -0500 | [diff] [blame] | 723 | static void __init test_basic_patching(void) |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 724 | { |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 725 | extern unsigned int ftr_fixup_test1[]; |
| 726 | extern unsigned int end_ftr_fixup_test1[]; |
| 727 | extern unsigned int ftr_fixup_test1_orig[]; |
| 728 | extern unsigned int ftr_fixup_test1_expected[]; |
Michael Ellerman | cad0e39 | 2018-04-17 00:39:03 +1000 | [diff] [blame] | 729 | int size = 4 * (end_ftr_fixup_test1 - ftr_fixup_test1); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 730 | |
| 731 | fixup.value = fixup.mask = 8; |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 732 | fixup.start_off = calc_offset(&fixup, ftr_fixup_test1 + 1); |
| 733 | fixup.end_off = calc_offset(&fixup, ftr_fixup_test1 + 2); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 734 | fixup.alt_start_off = fixup.alt_end_off = 0; |
| 735 | |
| 736 | /* Sanity check */ |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 737 | check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 738 | |
| 739 | /* Check we don't patch if the value matches */ |
| 740 | patch_feature_section(8, &fixup); |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 741 | check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 742 | |
| 743 | /* Check we do patch if the value doesn't match */ |
| 744 | patch_feature_section(0, &fixup); |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 745 | check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 746 | |
| 747 | /* Check we do patch if the mask doesn't match */ |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 748 | memcpy(ftr_fixup_test1, ftr_fixup_test1_orig, size); |
| 749 | check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 750 | patch_feature_section(~8, &fixup); |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 751 | check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 752 | } |
| 753 | |
Nick Child | ce0c6be | 2021-12-16 17:00:17 -0500 | [diff] [blame] | 754 | static void __init test_alternative_patching(void) |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 755 | { |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 756 | extern unsigned int ftr_fixup_test2[]; |
| 757 | extern unsigned int end_ftr_fixup_test2[]; |
| 758 | extern unsigned int ftr_fixup_test2_orig[]; |
| 759 | extern unsigned int ftr_fixup_test2_alt[]; |
| 760 | extern unsigned int ftr_fixup_test2_expected[]; |
Michael Ellerman | cad0e39 | 2018-04-17 00:39:03 +1000 | [diff] [blame] | 761 | int size = 4 * (end_ftr_fixup_test2 - ftr_fixup_test2); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 762 | |
| 763 | fixup.value = fixup.mask = 0xF; |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 764 | fixup.start_off = calc_offset(&fixup, ftr_fixup_test2 + 1); |
| 765 | fixup.end_off = calc_offset(&fixup, ftr_fixup_test2 + 2); |
| 766 | fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test2_alt); |
| 767 | fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test2_alt + 1); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 768 | |
| 769 | /* Sanity check */ |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 770 | check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 771 | |
| 772 | /* Check we don't patch if the value matches */ |
| 773 | patch_feature_section(0xF, &fixup); |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 774 | check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 775 | |
| 776 | /* Check we do patch if the value doesn't match */ |
| 777 | patch_feature_section(0, &fixup); |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 778 | check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 779 | |
| 780 | /* Check we do patch if the mask doesn't match */ |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 781 | memcpy(ftr_fixup_test2, ftr_fixup_test2_orig, size); |
| 782 | check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 783 | patch_feature_section(~0xF, &fixup); |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 784 | check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 785 | } |
| 786 | |
Nick Child | ce0c6be | 2021-12-16 17:00:17 -0500 | [diff] [blame] | 787 | static void __init test_alternative_case_too_big(void) |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 788 | { |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 789 | extern unsigned int ftr_fixup_test3[]; |
| 790 | extern unsigned int end_ftr_fixup_test3[]; |
| 791 | extern unsigned int ftr_fixup_test3_orig[]; |
| 792 | extern unsigned int ftr_fixup_test3_alt[]; |
Michael Ellerman | cad0e39 | 2018-04-17 00:39:03 +1000 | [diff] [blame] | 793 | int size = 4 * (end_ftr_fixup_test3 - ftr_fixup_test3); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 794 | |
| 795 | fixup.value = fixup.mask = 0xC; |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 796 | fixup.start_off = calc_offset(&fixup, ftr_fixup_test3 + 1); |
| 797 | fixup.end_off = calc_offset(&fixup, ftr_fixup_test3 + 2); |
| 798 | fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test3_alt); |
| 799 | fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test3_alt + 2); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 800 | |
| 801 | /* Sanity check */ |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 802 | check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 803 | |
| 804 | /* Expect nothing to be patched, and the error returned to us */ |
| 805 | check(patch_feature_section(0xF, &fixup) == 1); |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 806 | check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 807 | check(patch_feature_section(0, &fixup) == 1); |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 808 | check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 809 | check(patch_feature_section(~0xF, &fixup) == 1); |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 810 | check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 811 | } |
| 812 | |
Nick Child | ce0c6be | 2021-12-16 17:00:17 -0500 | [diff] [blame] | 813 | static void __init test_alternative_case_too_small(void) |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 814 | { |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 815 | extern unsigned int ftr_fixup_test4[]; |
| 816 | extern unsigned int end_ftr_fixup_test4[]; |
| 817 | extern unsigned int ftr_fixup_test4_orig[]; |
| 818 | extern unsigned int ftr_fixup_test4_alt[]; |
| 819 | extern unsigned int ftr_fixup_test4_expected[]; |
Michael Ellerman | cad0e39 | 2018-04-17 00:39:03 +1000 | [diff] [blame] | 820 | int size = 4 * (end_ftr_fixup_test4 - ftr_fixup_test4); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 821 | unsigned long flag; |
| 822 | |
| 823 | /* Check a high-bit flag */ |
| 824 | flag = 1UL << ((sizeof(unsigned long) - 1) * 8); |
| 825 | fixup.value = fixup.mask = flag; |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 826 | fixup.start_off = calc_offset(&fixup, ftr_fixup_test4 + 1); |
| 827 | fixup.end_off = calc_offset(&fixup, ftr_fixup_test4 + 5); |
| 828 | fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test4_alt); |
| 829 | fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test4_alt + 2); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 830 | |
| 831 | /* Sanity check */ |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 832 | check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 833 | |
| 834 | /* Check we don't patch if the value matches */ |
| 835 | patch_feature_section(flag, &fixup); |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 836 | check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 837 | |
| 838 | /* Check we do patch if the value doesn't match */ |
| 839 | patch_feature_section(0, &fixup); |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 840 | check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 841 | |
| 842 | /* Check we do patch if the mask doesn't match */ |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 843 | memcpy(ftr_fixup_test4, ftr_fixup_test4_orig, size); |
| 844 | check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 845 | patch_feature_section(~flag, &fixup); |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 846 | check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 847 | } |
| 848 | |
| 849 | static void test_alternative_case_with_branch(void) |
| 850 | { |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 851 | extern unsigned int ftr_fixup_test5[]; |
| 852 | extern unsigned int end_ftr_fixup_test5[]; |
| 853 | extern unsigned int ftr_fixup_test5_expected[]; |
Michael Ellerman | cad0e39 | 2018-04-17 00:39:03 +1000 | [diff] [blame] | 854 | int size = 4 * (end_ftr_fixup_test5 - ftr_fixup_test5); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 855 | |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 856 | check(memcmp(ftr_fixup_test5, ftr_fixup_test5_expected, size) == 0); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 857 | } |
| 858 | |
Nick Child | ce0c6be | 2021-12-16 17:00:17 -0500 | [diff] [blame] | 859 | static void __init test_alternative_case_with_external_branch(void) |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 860 | { |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 861 | extern unsigned int ftr_fixup_test6[]; |
| 862 | extern unsigned int end_ftr_fixup_test6[]; |
| 863 | extern unsigned int ftr_fixup_test6_expected[]; |
Michael Ellerman | cad0e39 | 2018-04-17 00:39:03 +1000 | [diff] [blame] | 864 | int size = 4 * (end_ftr_fixup_test6 - ftr_fixup_test6); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 865 | |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 866 | check(memcmp(ftr_fixup_test6, ftr_fixup_test6_expected, size) == 0); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 867 | } |
| 868 | |
Nick Child | ce0c6be | 2021-12-16 17:00:17 -0500 | [diff] [blame] | 869 | static void __init test_alternative_case_with_branch_to_end(void) |
Michael Ellerman | 6158fae | 2018-04-17 00:39:05 +1000 | [diff] [blame] | 870 | { |
| 871 | extern unsigned int ftr_fixup_test7[]; |
| 872 | extern unsigned int end_ftr_fixup_test7[]; |
| 873 | extern unsigned int ftr_fixup_test7_expected[]; |
| 874 | int size = 4 * (end_ftr_fixup_test7 - ftr_fixup_test7); |
| 875 | |
| 876 | check(memcmp(ftr_fixup_test7, ftr_fixup_test7_expected, size) == 0); |
| 877 | } |
| 878 | |
Nick Child | ce0c6be | 2021-12-16 17:00:17 -0500 | [diff] [blame] | 879 | static void __init test_cpu_macros(void) |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 880 | { |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 881 | extern u8 ftr_fixup_test_FTR_macros[]; |
| 882 | extern u8 ftr_fixup_test_FTR_macros_expected[]; |
| 883 | unsigned long size = ftr_fixup_test_FTR_macros_expected - |
| 884 | ftr_fixup_test_FTR_macros; |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 885 | |
| 886 | /* The fixups have already been done for us during boot */ |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 887 | check(memcmp(ftr_fixup_test_FTR_macros, |
| 888 | ftr_fixup_test_FTR_macros_expected, size) == 0); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 889 | } |
| 890 | |
Nick Child | ce0c6be | 2021-12-16 17:00:17 -0500 | [diff] [blame] | 891 | static void __init test_fw_macros(void) |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 892 | { |
| 893 | #ifdef CONFIG_PPC64 |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 894 | extern u8 ftr_fixup_test_FW_FTR_macros[]; |
| 895 | extern u8 ftr_fixup_test_FW_FTR_macros_expected[]; |
| 896 | unsigned long size = ftr_fixup_test_FW_FTR_macros_expected - |
| 897 | ftr_fixup_test_FW_FTR_macros; |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 898 | |
| 899 | /* The fixups have already been done for us during boot */ |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 900 | check(memcmp(ftr_fixup_test_FW_FTR_macros, |
| 901 | ftr_fixup_test_FW_FTR_macros_expected, size) == 0); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 902 | #endif |
| 903 | } |
| 904 | |
Nick Child | ce0c6be | 2021-12-16 17:00:17 -0500 | [diff] [blame] | 905 | static void __init test_lwsync_macros(void) |
Kumar Gala | 2d1b202 | 2008-07-02 01:16:40 +1000 | [diff] [blame] | 906 | { |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 907 | extern u8 lwsync_fixup_test[]; |
| 908 | extern u8 end_lwsync_fixup_test[]; |
| 909 | extern u8 lwsync_fixup_test_expected_LWSYNC[]; |
| 910 | extern u8 lwsync_fixup_test_expected_SYNC[]; |
| 911 | unsigned long size = end_lwsync_fixup_test - |
| 912 | lwsync_fixup_test; |
Kumar Gala | 2d1b202 | 2008-07-02 01:16:40 +1000 | [diff] [blame] | 913 | |
| 914 | /* The fixups have already been done for us during boot */ |
| 915 | if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) { |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 916 | check(memcmp(lwsync_fixup_test, |
| 917 | lwsync_fixup_test_expected_LWSYNC, size) == 0); |
Kumar Gala | 2d1b202 | 2008-07-02 01:16:40 +1000 | [diff] [blame] | 918 | } else { |
Daniel Axtens | c69a48c | 2017-07-12 14:36:07 -0700 | [diff] [blame] | 919 | check(memcmp(lwsync_fixup_test, |
| 920 | lwsync_fixup_test_expected_SYNC, size) == 0); |
Kumar Gala | 2d1b202 | 2008-07-02 01:16:40 +1000 | [diff] [blame] | 921 | } |
| 922 | } |
| 923 | |
Jordan Niethe | 785b79d | 2020-05-06 13:40:45 +1000 | [diff] [blame] | 924 | #ifdef CONFIG_PPC64 |
| 925 | static void __init test_prefix_patching(void) |
| 926 | { |
| 927 | extern unsigned int ftr_fixup_prefix1[]; |
| 928 | extern unsigned int end_ftr_fixup_prefix1[]; |
| 929 | extern unsigned int ftr_fixup_prefix1_orig[]; |
| 930 | extern unsigned int ftr_fixup_prefix1_expected[]; |
| 931 | int size = sizeof(unsigned int) * (end_ftr_fixup_prefix1 - ftr_fixup_prefix1); |
| 932 | |
| 933 | fixup.value = fixup.mask = 8; |
| 934 | fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix1 + 1); |
| 935 | fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix1 + 3); |
| 936 | fixup.alt_start_off = fixup.alt_end_off = 0; |
| 937 | |
| 938 | /* Sanity check */ |
| 939 | check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_orig, size) == 0); |
| 940 | |
| 941 | patch_feature_section(0, &fixup); |
| 942 | check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_expected, size) == 0); |
| 943 | check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_orig, size) != 0); |
| 944 | } |
| 945 | |
| 946 | static void __init test_prefix_alt_patching(void) |
| 947 | { |
| 948 | extern unsigned int ftr_fixup_prefix2[]; |
| 949 | extern unsigned int end_ftr_fixup_prefix2[]; |
| 950 | extern unsigned int ftr_fixup_prefix2_orig[]; |
| 951 | extern unsigned int ftr_fixup_prefix2_expected[]; |
| 952 | extern unsigned int ftr_fixup_prefix2_alt[]; |
| 953 | int size = sizeof(unsigned int) * (end_ftr_fixup_prefix2 - ftr_fixup_prefix2); |
| 954 | |
| 955 | fixup.value = fixup.mask = 8; |
| 956 | fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix2 + 1); |
| 957 | fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix2 + 3); |
| 958 | fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_prefix2_alt); |
| 959 | fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_prefix2_alt + 2); |
| 960 | /* Sanity check */ |
| 961 | check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_orig, size) == 0); |
| 962 | |
| 963 | patch_feature_section(0, &fixup); |
| 964 | check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_expected, size) == 0); |
| 965 | check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_orig, size) != 0); |
| 966 | } |
| 967 | |
| 968 | static void __init test_prefix_word_alt_patching(void) |
| 969 | { |
| 970 | extern unsigned int ftr_fixup_prefix3[]; |
| 971 | extern unsigned int end_ftr_fixup_prefix3[]; |
| 972 | extern unsigned int ftr_fixup_prefix3_orig[]; |
| 973 | extern unsigned int ftr_fixup_prefix3_expected[]; |
| 974 | extern unsigned int ftr_fixup_prefix3_alt[]; |
| 975 | int size = sizeof(unsigned int) * (end_ftr_fixup_prefix3 - ftr_fixup_prefix3); |
| 976 | |
| 977 | fixup.value = fixup.mask = 8; |
| 978 | fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix3 + 1); |
| 979 | fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix3 + 4); |
| 980 | fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_prefix3_alt); |
| 981 | fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_prefix3_alt + 3); |
| 982 | /* Sanity check */ |
| 983 | check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_orig, size) == 0); |
| 984 | |
| 985 | patch_feature_section(0, &fixup); |
| 986 | check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_expected, size) == 0); |
| 987 | patch_feature_section(0, &fixup); |
| 988 | check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_orig, size) != 0); |
| 989 | } |
| 990 | #else |
| 991 | static inline void test_prefix_patching(void) {} |
| 992 | static inline void test_prefix_alt_patching(void) {} |
| 993 | static inline void test_prefix_word_alt_patching(void) {} |
| 994 | #endif /* CONFIG_PPC64 */ |
| 995 | |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 996 | static int __init test_feature_fixups(void) |
| 997 | { |
| 998 | printk(KERN_DEBUG "Running feature fixup self-tests ...\n"); |
| 999 | |
| 1000 | test_basic_patching(); |
| 1001 | test_alternative_patching(); |
| 1002 | test_alternative_case_too_big(); |
| 1003 | test_alternative_case_too_small(); |
| 1004 | test_alternative_case_with_branch(); |
| 1005 | test_alternative_case_with_external_branch(); |
Michael Ellerman | 6158fae | 2018-04-17 00:39:05 +1000 | [diff] [blame] | 1006 | test_alternative_case_with_branch_to_end(); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 1007 | test_cpu_macros(); |
| 1008 | test_fw_macros(); |
Kumar Gala | 2d1b202 | 2008-07-02 01:16:40 +1000 | [diff] [blame] | 1009 | test_lwsync_macros(); |
Jordan Niethe | 785b79d | 2020-05-06 13:40:45 +1000 | [diff] [blame] | 1010 | test_prefix_patching(); |
| 1011 | test_prefix_alt_patching(); |
| 1012 | test_prefix_word_alt_patching(); |
Michael Ellerman | 362e770 | 2008-06-24 11:33:03 +1000 | [diff] [blame] | 1013 | |
| 1014 | return 0; |
| 1015 | } |
| 1016 | late_initcall(test_feature_fixups); |
| 1017 | |
| 1018 | #endif /* CONFIG_FTR_FIXUP_SELFTEST */ |