blob: 1fd31b4b0e139e36715f49fbd2928f5384d6ed3b [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Michael Ellerman51c52e82008-06-24 11:32:36 +10002/*
3 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
4 *
5 * Modifications for ppc64:
6 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
7 *
8 * Copyright 2008 Michael Ellerman, IBM Corporation.
Michael Ellerman51c52e82008-06-24 11:32:36 +10009 */
10
Stephen Rothwell3880ecb2010-06-28 21:08:29 +000011#include <linux/types.h>
Aneesh Kumar K.V309b3152016-07-23 14:42:38 +053012#include <linux/jump_label.h>
Michael Ellerman51c52e82008-06-24 11:32:36 +100013#include <linux/kernel.h>
Michael Ellerman362e7702008-06-24 11:33:03 +100014#include <linux/string.h>
15#include <linux/init.h>
Ingo Molnar589ee622017-02-04 00:16:44 +010016#include <linux/sched/mm.h>
Michael Ellerman51c52e82008-06-24 11:32:36 +100017#include <asm/cputable.h>
18#include <asm/code-patching.h>
Anton Blanchardd715e432011-11-14 12:54:47 +000019#include <asm/page.h>
20#include <asm/sections.h>
Benjamin Herrenschmidt9402c682016-07-05 15:03:41 +100021#include <asm/setup.h>
Nicholas Piggina048a072018-05-22 09:00:00 +100022#include <asm/security_features.h>
Benjamin Herrenschmidt9402c682016-07-05 15:03:41 +100023#include <asm/firmware.h>
Jordan Niethe75346252020-05-06 13:40:26 +100024#include <asm/inst.h>
Michael Ellerman51c52e82008-06-24 11:32:36 +100025
26struct fixup_entry {
27 unsigned long mask;
28 unsigned long value;
29 long start_off;
30 long end_off;
Michael Ellermanfac23fe2008-06-24 11:32:54 +100031 long alt_start_off;
32 long alt_end_off;
Michael Ellerman51c52e82008-06-24 11:32:36 +100033};
34
Jordan Niethe94afd062020-05-06 13:40:31 +100035static struct ppc_inst *calc_addr(struct fixup_entry *fcur, long offset)
Michael Ellerman51c52e82008-06-24 11:32:36 +100036{
Michael Ellerman9b1a7352008-06-24 11:33:02 +100037 /*
38 * We store the offset to the code as a negative offset from
39 * the start of the alt_entry, to support the VDSO. This
40 * routine converts that back into an actual address.
41 */
Jordan Niethe94afd062020-05-06 13:40:31 +100042 return (struct ppc_inst *)((unsigned long)fcur + offset);
Michael Ellerman9b1a7352008-06-24 11:33:02 +100043}
44
Jordan Niethe94afd062020-05-06 13:40:31 +100045static int patch_alt_instruction(struct ppc_inst *src, struct ppc_inst *dest,
46 struct ppc_inst *alt_start, struct ppc_inst *alt_end)
Michael Ellerman9b1a7352008-06-24 11:33:02 +100047{
Jordan Niethe7c95d882020-05-06 13:40:25 +100048 int err;
Jordan Niethe94afd062020-05-06 13:40:31 +100049 struct ppc_inst instr;
Michael Ellerman9b1a7352008-06-24 11:33:02 +100050
Jordan Niethef8faaff2020-05-06 13:40:32 +100051 instr = ppc_inst_read(src);
Michael Ellerman9b1a7352008-06-24 11:33:02 +100052
53 if (instr_is_relative_branch(*src)) {
Jordan Niethe94afd062020-05-06 13:40:31 +100054 struct ppc_inst *target = (struct ppc_inst *)branch_target(src);
Michael Ellerman9b1a7352008-06-24 11:33:02 +100055
56 /* Branch within the section doesn't need translating */
Michael Ellermanb8858582018-04-16 23:25:19 +100057 if (target < alt_start || target > alt_end) {
Jordan Niethe7c95d882020-05-06 13:40:25 +100058 err = translate_branch(&instr, dest, src);
59 if (err)
Michael Ellerman9b1a7352008-06-24 11:33:02 +100060 return 1;
61 }
62 }
63
Christophe Leroy8183d992017-11-24 08:31:09 +010064 raw_patch_instruction(dest, instr);
Michael Ellerman9b1a7352008-06-24 11:33:02 +100065
66 return 0;
67}
68
69static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
70{
Michael Ellermanc5ff46d2020-05-22 23:33:18 +100071 struct ppc_inst *start, *end, *alt_start, *alt_end, *src, *dest, nop;
Michael Ellerman9b1a7352008-06-24 11:33:02 +100072
73 start = calc_addr(fcur, fcur->start_off);
74 end = calc_addr(fcur, fcur->end_off);
75 alt_start = calc_addr(fcur, fcur->alt_start_off);
76 alt_end = calc_addr(fcur, fcur->alt_end_off);
77
78 if ((alt_end - alt_start) > (end - start))
79 return 1;
Michael Ellerman51c52e82008-06-24 11:32:36 +100080
81 if ((value & fcur->mask) == fcur->value)
Michael Ellerman9b1a7352008-06-24 11:33:02 +100082 return 0;
Michael Ellerman51c52e82008-06-24 11:32:36 +100083
Michael Ellerman9b1a7352008-06-24 11:33:02 +100084 src = alt_start;
85 dest = start;
Michael Ellerman51c52e82008-06-24 11:32:36 +100086
Michael Ellermanc5ff46d2020-05-22 23:33:18 +100087 for (; src < alt_end; src = ppc_inst_next(src, src),
88 dest = ppc_inst_next(dest, dest)) {
Michael Ellerman9b1a7352008-06-24 11:33:02 +100089 if (patch_alt_instruction(src, dest, alt_start, alt_end))
90 return 1;
Michael Ellerman51c52e82008-06-24 11:32:36 +100091 }
Michael Ellerman9b1a7352008-06-24 11:33:02 +100092
Michael Ellermanc5ff46d2020-05-22 23:33:18 +100093 nop = ppc_inst(PPC_INST_NOP);
94 for (; dest < end; dest = ppc_inst_next(dest, &nop))
95 raw_patch_instruction(dest, nop);
Michael Ellerman9b1a7352008-06-24 11:33:02 +100096
97 return 0;
Michael Ellerman51c52e82008-06-24 11:32:36 +100098}
99
100void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
101{
102 struct fixup_entry *fcur, *fend;
103
104 fcur = fixup_start;
105 fend = fixup_end;
106
Michael Ellerman9b1a7352008-06-24 11:33:02 +1000107 for (; fcur < fend; fcur++) {
108 if (patch_feature_section(value, fcur)) {
Michael Ellerman1856c022008-07-17 14:46:00 +1000109 WARN_ON(1);
Michael Ellerman9b1a7352008-06-24 11:33:02 +1000110 printk("Unable to patch feature section at %p - %p" \
111 " with %p - %p\n",
112 calc_addr(fcur, fcur->start_off),
113 calc_addr(fcur, fcur->end_off),
114 calc_addr(fcur, fcur->alt_start_off),
115 calc_addr(fcur, fcur->alt_end_off));
116 }
117 }
Michael Ellerman51c52e82008-06-24 11:32:36 +1000118}
Michael Ellerman362e7702008-06-24 11:33:03 +1000119
Michael Ellermanaa8a5e02018-01-10 03:07:15 +1100120#ifdef CONFIG_PPC_BOOK3S_64
Breno Leitao3b30c6e2018-10-22 11:54:17 -0300121static void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
Nicholas Piggina048a072018-05-22 09:00:00 +1000122{
123 unsigned int instrs[3], *dest;
124 long *start, *end;
125 int i;
126
Daniel Axtens1fc0c272020-12-02 01:43:44 +1100127 start = PTRRELOC(&__start___stf_entry_barrier_fixup);
Nicholas Piggina048a072018-05-22 09:00:00 +1000128 end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
129
130 instrs[0] = 0x60000000; /* nop */
131 instrs[1] = 0x60000000; /* nop */
132 instrs[2] = 0x60000000; /* nop */
133
134 i = 0;
135 if (types & STF_BARRIER_FALLBACK) {
136 instrs[i++] = 0x7d4802a6; /* mflr r10 */
137 instrs[i++] = 0x60000000; /* branch patched below */
138 instrs[i++] = 0x7d4803a6; /* mtlr r10 */
139 } else if (types & STF_BARRIER_EIEIO) {
140 instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
141 } else if (types & STF_BARRIER_SYNC_ORI) {
142 instrs[i++] = 0x7c0004ac; /* hwsync */
143 instrs[i++] = 0xe94d0000; /* ld r10,0(r13) */
144 instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
145 }
146
147 for (i = 0; start < end; start++, i++) {
148 dest = (void *)start + *start;
149
150 pr_devel("patching dest %lx\n", (unsigned long)dest);
151
Jordan Niethe94afd062020-05-06 13:40:31 +1000152 patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
Nicholas Piggina048a072018-05-22 09:00:00 +1000153
154 if (types & STF_BARRIER_FALLBACK)
Jordan Niethe94afd062020-05-06 13:40:31 +1000155 patch_branch((struct ppc_inst *)(dest + 1),
156 (unsigned long)&stf_barrier_fallback,
Nicholas Piggina048a072018-05-22 09:00:00 +1000157 BRANCH_SET_LINK);
158 else
Jordan Niethe94afd062020-05-06 13:40:31 +1000159 patch_instruction((struct ppc_inst *)(dest + 1),
160 ppc_inst(instrs[1]));
Nicholas Piggina048a072018-05-22 09:00:00 +1000161
Jordan Niethe94afd062020-05-06 13:40:31 +1000162 patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
Nicholas Piggina048a072018-05-22 09:00:00 +1000163 }
164
165 printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
166 (types == STF_BARRIER_NONE) ? "no" :
167 (types == STF_BARRIER_FALLBACK) ? "fallback" :
168 (types == STF_BARRIER_EIEIO) ? "eieio" :
169 (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
170 : "unknown");
171}
172
Breno Leitao3b30c6e2018-10-22 11:54:17 -0300173static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
Nicholas Piggina048a072018-05-22 09:00:00 +1000174{
175 unsigned int instrs[6], *dest;
176 long *start, *end;
177 int i;
178
Daniel Axtens1fc0c272020-12-02 01:43:44 +1100179 start = PTRRELOC(&__start___stf_exit_barrier_fixup);
Nicholas Piggina048a072018-05-22 09:00:00 +1000180 end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
181
182 instrs[0] = 0x60000000; /* nop */
183 instrs[1] = 0x60000000; /* nop */
184 instrs[2] = 0x60000000; /* nop */
185 instrs[3] = 0x60000000; /* nop */
186 instrs[4] = 0x60000000; /* nop */
187 instrs[5] = 0x60000000; /* nop */
188
189 i = 0;
190 if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
191 if (cpu_has_feature(CPU_FTR_HVMODE)) {
192 instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */
193 instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */
194 } else {
195 instrs[i++] = 0x7db243a6; /* mtsprg 2,r13 */
196 instrs[i++] = 0x7db142a6; /* mfsprg r13,1 */
197 }
198 instrs[i++] = 0x7c0004ac; /* hwsync */
199 instrs[i++] = 0xe9ad0000; /* ld r13,0(r13) */
200 instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
201 if (cpu_has_feature(CPU_FTR_HVMODE)) {
202 instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */
203 } else {
204 instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */
205 }
206 } else if (types & STF_BARRIER_EIEIO) {
207 instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
208 }
209
210 for (i = 0; start < end; start++, i++) {
211 dest = (void *)start + *start;
212
213 pr_devel("patching dest %lx\n", (unsigned long)dest);
214
Jordan Niethe94afd062020-05-06 13:40:31 +1000215 patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
216 patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
217 patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
218 patch_instruction((struct ppc_inst *)(dest + 3), ppc_inst(instrs[3]));
219 patch_instruction((struct ppc_inst *)(dest + 4), ppc_inst(instrs[4]));
220 patch_instruction((struct ppc_inst *)(dest + 5), ppc_inst(instrs[5]));
Nicholas Piggina048a072018-05-22 09:00:00 +1000221 }
222 printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
223 (types == STF_BARRIER_NONE) ? "no" :
224 (types == STF_BARRIER_FALLBACK) ? "fallback" :
225 (types == STF_BARRIER_EIEIO) ? "eieio" :
226 (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
227 : "unknown");
228}
229
230
231void do_stf_barrier_fixups(enum stf_barrier_type types)
232{
233 do_stf_entry_barrier_fixups(types);
234 do_stf_exit_barrier_fixups(types);
235}
236
Nicholas Piggin9a32a7e2020-11-17 16:59:13 +1100237void do_uaccess_flush_fixups(enum l1d_flush_type types)
238{
239 unsigned int instrs[4], *dest;
240 long *start, *end;
241 int i;
242
243 start = PTRRELOC(&__start___uaccess_flush_fixup);
244 end = PTRRELOC(&__stop___uaccess_flush_fixup);
245
246 instrs[0] = 0x60000000; /* nop */
247 instrs[1] = 0x60000000; /* nop */
248 instrs[2] = 0x60000000; /* nop */
249 instrs[3] = 0x4e800020; /* blr */
250
251 i = 0;
252 if (types == L1D_FLUSH_FALLBACK) {
253 instrs[3] = 0x60000000; /* nop */
254 /* fallthrough to fallback flush */
255 }
256
257 if (types & L1D_FLUSH_ORI) {
258 instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
259 instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
260 }
261
262 if (types & L1D_FLUSH_MTTRIG)
263 instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
264
265 for (i = 0; start < end; start++, i++) {
266 dest = (void *)start + *start;
267
268 pr_devel("patching dest %lx\n", (unsigned long)dest);
269
270 patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
271
272 patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
273 patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
274 patch_instruction((struct ppc_inst *)(dest + 3), ppc_inst(instrs[3]));
275 }
276
277 printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i,
278 (types == L1D_FLUSH_NONE) ? "no" :
279 (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
280 (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
281 ? "ori+mttrig type"
282 : "ori type" :
283 (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
284 : "unknown");
285}
286
Nicholas Pigginf7964372020-11-17 16:59:12 +1100287void do_entry_flush_fixups(enum l1d_flush_type types)
288{
289 unsigned int instrs[3], *dest;
290 long *start, *end;
291 int i;
292
Nicholas Pigginf7964372020-11-17 16:59:12 +1100293 instrs[0] = 0x60000000; /* nop */
294 instrs[1] = 0x60000000; /* nop */
295 instrs[2] = 0x60000000; /* nop */
296
297 i = 0;
298 if (types == L1D_FLUSH_FALLBACK) {
299 instrs[i++] = 0x7d4802a6; /* mflr r10 */
300 instrs[i++] = 0x60000000; /* branch patched below */
301 instrs[i++] = 0x7d4803a6; /* mtlr r10 */
302 }
303
304 if (types & L1D_FLUSH_ORI) {
305 instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
306 instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
307 }
308
309 if (types & L1D_FLUSH_MTTRIG)
310 instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
311
Nicholas Piggin08685be2021-01-11 16:24:08 +1000312 start = PTRRELOC(&__start___entry_flush_fixup);
313 end = PTRRELOC(&__stop___entry_flush_fixup);
Nicholas Pigginf7964372020-11-17 16:59:12 +1100314 for (i = 0; start < end; start++, i++) {
315 dest = (void *)start + *start;
316
317 pr_devel("patching dest %lx\n", (unsigned long)dest);
318
319 patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
320
321 if (types == L1D_FLUSH_FALLBACK)
322 patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&entry_flush_fallback,
323 BRANCH_SET_LINK);
324 else
325 patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
326
327 patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
328 }
329
Nicholas Piggin08685be2021-01-11 16:24:08 +1000330 start = PTRRELOC(&__start___scv_entry_flush_fixup);
331 end = PTRRELOC(&__stop___scv_entry_flush_fixup);
332 for (; start < end; start++, i++) {
333 dest = (void *)start + *start;
334
335 pr_devel("patching dest %lx\n", (unsigned long)dest);
336
337 patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
338
339 if (types == L1D_FLUSH_FALLBACK)
340 patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&scv_entry_flush_fallback,
341 BRANCH_SET_LINK);
342 else
343 patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
344
345 patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
346 }
347
348
Nicholas Pigginf7964372020-11-17 16:59:12 +1100349 printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i,
350 (types == L1D_FLUSH_NONE) ? "no" :
351 (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
352 (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
353 ? "ori+mttrig type"
354 : "ori type" :
355 (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
356 : "unknown");
357}
358
Michael Ellermanaa8a5e02018-01-10 03:07:15 +1100359void do_rfi_flush_fixups(enum l1d_flush_type types)
360{
361 unsigned int instrs[3], *dest;
362 long *start, *end;
363 int i;
364
Daniel Axtens1fc0c272020-12-02 01:43:44 +1100365 start = PTRRELOC(&__start___rfi_flush_fixup);
Michael Ellermanaa8a5e02018-01-10 03:07:15 +1100366 end = PTRRELOC(&__stop___rfi_flush_fixup);
367
368 instrs[0] = 0x60000000; /* nop */
369 instrs[1] = 0x60000000; /* nop */
370 instrs[2] = 0x60000000; /* nop */
371
372 if (types & L1D_FLUSH_FALLBACK)
373 /* b .+16 to fallback flush */
374 instrs[0] = 0x48000010;
375
376 i = 0;
377 if (types & L1D_FLUSH_ORI) {
378 instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
379 instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
380 }
381
382 if (types & L1D_FLUSH_MTTRIG)
383 instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
384
385 for (i = 0; start < end; start++, i++) {
386 dest = (void *)start + *start;
387
388 pr_devel("patching dest %lx\n", (unsigned long)dest);
389
Jordan Niethe94afd062020-05-06 13:40:31 +1000390 patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
391 patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
392 patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
Michael Ellermanaa8a5e02018-01-10 03:07:15 +1100393 }
394
Mauricio Faria de Oliveira0063d612018-03-14 19:40:41 -0300395 printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i,
396 (types == L1D_FLUSH_NONE) ? "no" :
397 (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
398 (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
399 ? "ori+mttrig type"
400 : "ori type" :
401 (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
402 : "unknown");
Michael Ellermanaa8a5e02018-01-10 03:07:15 +1100403}
Michal Suchanek2eea7f02018-04-24 14:15:55 +1000404
Michal Suchanek815069c2018-04-24 14:15:56 +1000405void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
Michal Suchanek2eea7f02018-04-24 14:15:55 +1000406{
407 unsigned int instr, *dest;
408 long *start, *end;
409 int i;
410
Michal Suchanek815069c2018-04-24 14:15:56 +1000411 start = fixup_start;
412 end = fixup_end;
Michal Suchanek2eea7f02018-04-24 14:15:55 +1000413
414 instr = 0x60000000; /* nop */
415
416 if (enable) {
417 pr_info("barrier-nospec: using ORI speculation barrier\n");
418 instr = 0x63ff0000; /* ori 31,31,0 speculation barrier */
419 }
420
421 for (i = 0; start < end; start++, i++) {
422 dest = (void *)start + *start;
423
424 pr_devel("patching dest %lx\n", (unsigned long)dest);
Jordan Niethe94afd062020-05-06 13:40:31 +1000425 patch_instruction((struct ppc_inst *)dest, ppc_inst(instr));
Michal Suchanek2eea7f02018-04-24 14:15:55 +1000426 }
427
428 printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
429}
430
Michael Ellerman179ab1c2018-07-28 09:06:34 +1000431#endif /* CONFIG_PPC_BOOK3S_64 */
432
433#ifdef CONFIG_PPC_BARRIER_NOSPEC
Michal Suchanek815069c2018-04-24 14:15:56 +1000434void do_barrier_nospec_fixups(bool enable)
435{
436 void *start, *end;
437
Daniel Axtens1fc0c272020-12-02 01:43:44 +1100438 start = PTRRELOC(&__start___barrier_nospec_fixup);
Michal Suchanek815069c2018-04-24 14:15:56 +1000439 end = PTRRELOC(&__stop___barrier_nospec_fixup);
440
441 do_barrier_nospec_fixups_range(enable, start, end);
442}
Michael Ellerman179ab1c2018-07-28 09:06:34 +1000443#endif /* CONFIG_PPC_BARRIER_NOSPEC */
Michael Ellermanaa8a5e02018-01-10 03:07:15 +1100444
Diana Craciunebcd1bf2018-07-28 09:06:37 +1000445#ifdef CONFIG_PPC_FSL_BOOK3E
446void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
447{
448 unsigned int instr[2], *dest;
449 long *start, *end;
450 int i;
451
452 start = fixup_start;
453 end = fixup_end;
454
455 instr[0] = PPC_INST_NOP;
456 instr[1] = PPC_INST_NOP;
457
458 if (enable) {
459 pr_info("barrier-nospec: using isync; sync as speculation barrier\n");
460 instr[0] = PPC_INST_ISYNC;
461 instr[1] = PPC_INST_SYNC;
462 }
463
464 for (i = 0; start < end; start++, i++) {
465 dest = (void *)start + *start;
466
467 pr_devel("patching dest %lx\n", (unsigned long)dest);
Jordan Niethe94afd062020-05-06 13:40:31 +1000468 patch_instruction((struct ppc_inst *)dest, ppc_inst(instr[0]));
469 patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instr[1]));
Diana Craciunebcd1bf2018-07-28 09:06:37 +1000470 }
471
472 printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
473}
Diana Craciun76a5eaa2018-12-12 16:03:00 +0200474
475static void patch_btb_flush_section(long *curr)
476{
477 unsigned int *start, *end;
478
479 start = (void *)curr + *curr;
480 end = (void *)curr + *(curr + 1);
481 for (; start < end; start++) {
482 pr_devel("patching dest %lx\n", (unsigned long)start);
Jordan Niethe94afd062020-05-06 13:40:31 +1000483 patch_instruction((struct ppc_inst *)start, ppc_inst(PPC_INST_NOP));
Diana Craciun76a5eaa2018-12-12 16:03:00 +0200484 }
485}
486
487void do_btb_flush_fixups(void)
488{
489 long *start, *end;
490
491 start = PTRRELOC(&__start__btb_flush_fixup);
492 end = PTRRELOC(&__stop__btb_flush_fixup);
493
494 for (; start < end; start += 2)
495 patch_btb_flush_section(start);
496}
Diana Craciunebcd1bf2018-07-28 09:06:37 +1000497#endif /* CONFIG_PPC_FSL_BOOK3E */
498
Kumar Gala2d1b2022008-07-02 01:16:40 +1000499void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
500{
Benjamin Herrenschmidt3d98ffb2010-02-26 18:29:17 +1100501 long *start, *end;
Jordan Niethe94afd062020-05-06 13:40:31 +1000502 struct ppc_inst *dest;
Kumar Gala2d1b2022008-07-02 01:16:40 +1000503
504 if (!(value & CPU_FTR_LWSYNC))
505 return ;
506
507 start = fixup_start;
508 end = fixup_end;
509
510 for (; start < end; start++) {
511 dest = (void *)start + *start;
Jordan Niethe75346252020-05-06 13:40:26 +1000512 raw_patch_instruction(dest, ppc_inst(PPC_INST_LWSYNC));
Kumar Gala2d1b2022008-07-02 01:16:40 +1000513 }
514}
515
Benjamin Herrenschmidt9402c682016-07-05 15:03:41 +1000516static void do_final_fixups(void)
Anton Blanchardd715e432011-11-14 12:54:47 +0000517{
518#if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
Jordan Niethe622cf6f2020-05-06 13:40:37 +1000519 struct ppc_inst inst, *src, *dest, *end;
Anton Blanchardd715e432011-11-14 12:54:47 +0000520
521 if (PHYSICAL_START == 0)
522 return;
523
Jordan Niethe94afd062020-05-06 13:40:31 +1000524 src = (struct ppc_inst *)(KERNELBASE + PHYSICAL_START);
525 dest = (struct ppc_inst *)KERNELBASE;
Jordan Niethe622cf6f2020-05-06 13:40:37 +1000526 end = (void *)src + (__end_interrupts - _stext);
Anton Blanchardd715e432011-11-14 12:54:47 +0000527
Jordan Niethe622cf6f2020-05-06 13:40:37 +1000528 while (src < end) {
529 inst = ppc_inst_read(src);
530 raw_patch_instruction(dest, inst);
Michael Ellermanc5ff46d2020-05-22 23:33:18 +1000531 src = ppc_inst_next(src, src);
532 dest = ppc_inst_next(dest, dest);
Anton Blanchardd715e432011-11-14 12:54:47 +0000533 }
534#endif
535}
536
Michael Ellermana28e46f2016-07-26 22:29:18 +1000537static unsigned long __initdata saved_cpu_features;
538static unsigned int __initdata saved_mmu_features;
539#ifdef CONFIG_PPC64
540static unsigned long __initdata saved_firmware_features;
541#endif
542
543void __init apply_feature_fixups(void)
Benjamin Herrenschmidt9402c682016-07-05 15:03:41 +1000544{
Benjamin Herrenschmidt2c0f9952016-08-02 15:53:01 +1000545 struct cpu_spec *spec = PTRRELOC(*PTRRELOC(&cur_cpu_spec));
Benjamin Herrenschmidt9402c682016-07-05 15:03:41 +1000546
Michael Ellermana28e46f2016-07-26 22:29:18 +1000547 *PTRRELOC(&saved_cpu_features) = spec->cpu_features;
548 *PTRRELOC(&saved_mmu_features) = spec->mmu_features;
549
Benjamin Herrenschmidt9402c682016-07-05 15:03:41 +1000550 /*
551 * Apply the CPU-specific and firmware specific fixups to kernel text
552 * (nop out sections not relevant to this CPU or this firmware).
553 */
554 do_feature_fixups(spec->cpu_features,
555 PTRRELOC(&__start___ftr_fixup),
556 PTRRELOC(&__stop___ftr_fixup));
557
558 do_feature_fixups(spec->mmu_features,
559 PTRRELOC(&__start___mmu_ftr_fixup),
560 PTRRELOC(&__stop___mmu_ftr_fixup));
561
562 do_lwsync_fixups(spec->cpu_features,
563 PTRRELOC(&__start___lwsync_fixup),
564 PTRRELOC(&__stop___lwsync_fixup));
565
566#ifdef CONFIG_PPC64
Michael Ellermana28e46f2016-07-26 22:29:18 +1000567 saved_firmware_features = powerpc_firmware_features;
Benjamin Herrenschmidt9402c682016-07-05 15:03:41 +1000568 do_feature_fixups(powerpc_firmware_features,
569 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
570#endif
571 do_final_fixups();
Benjamin Herrenschmidt97f6e0c2016-08-10 17:27:34 +1000572}
Aneesh Kumar K.V309b3152016-07-23 14:42:38 +0530573
Benjamin Herrenschmidt97f6e0c2016-08-10 17:27:34 +1000574void __init setup_feature_keys(void)
575{
Aneesh Kumar K.V309b3152016-07-23 14:42:38 +0530576 /*
577 * Initialise jump label. This causes all the cpu/mmu_has_feature()
578 * checks to take on their correct polarity based on the current set of
579 * CPU/MMU features.
580 */
581 jump_label_init();
Kevin Hao4db73272016-07-23 14:42:41 +0530582 cpu_feature_keys_init();
Kevin Haoc12e6f22016-07-23 14:42:42 +0530583 mmu_feature_keys_init();
Benjamin Herrenschmidt9402c682016-07-05 15:03:41 +1000584}
585
Michael Ellermana28e46f2016-07-26 22:29:18 +1000586static int __init check_features(void)
587{
588 WARN(saved_cpu_features != cur_cpu_spec->cpu_features,
589 "CPU features changed after feature patching!\n");
590 WARN(saved_mmu_features != cur_cpu_spec->mmu_features,
591 "MMU features changed after feature patching!\n");
592#ifdef CONFIG_PPC64
593 WARN(saved_firmware_features != powerpc_firmware_features,
594 "Firmware features changed after feature patching!\n");
595#endif
596
597 return 0;
598}
599late_initcall(check_features);
600
Michael Ellerman362e7702008-06-24 11:33:03 +1000601#ifdef CONFIG_FTR_FIXUP_SELFTEST
602
603#define check(x) \
604 if (!(x)) printk("feature-fixups: test failed at line %d\n", __LINE__);
605
606/* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */
607static struct fixup_entry fixup;
608
609static long calc_offset(struct fixup_entry *entry, unsigned int *p)
610{
611 return (unsigned long)p - (unsigned long)entry;
612}
613
Anton Blancharde51df2c2014-08-20 08:55:18 +1000614static void test_basic_patching(void)
Michael Ellerman362e7702008-06-24 11:33:03 +1000615{
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700616 extern unsigned int ftr_fixup_test1[];
617 extern unsigned int end_ftr_fixup_test1[];
618 extern unsigned int ftr_fixup_test1_orig[];
619 extern unsigned int ftr_fixup_test1_expected[];
Michael Ellermancad0e392018-04-17 00:39:03 +1000620 int size = 4 * (end_ftr_fixup_test1 - ftr_fixup_test1);
Michael Ellerman362e7702008-06-24 11:33:03 +1000621
622 fixup.value = fixup.mask = 8;
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700623 fixup.start_off = calc_offset(&fixup, ftr_fixup_test1 + 1);
624 fixup.end_off = calc_offset(&fixup, ftr_fixup_test1 + 2);
Michael Ellerman362e7702008-06-24 11:33:03 +1000625 fixup.alt_start_off = fixup.alt_end_off = 0;
626
627 /* Sanity check */
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700628 check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
Michael Ellerman362e7702008-06-24 11:33:03 +1000629
630 /* Check we don't patch if the value matches */
631 patch_feature_section(8, &fixup);
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700632 check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
Michael Ellerman362e7702008-06-24 11:33:03 +1000633
634 /* Check we do patch if the value doesn't match */
635 patch_feature_section(0, &fixup);
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700636 check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0);
Michael Ellerman362e7702008-06-24 11:33:03 +1000637
638 /* Check we do patch if the mask doesn't match */
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700639 memcpy(ftr_fixup_test1, ftr_fixup_test1_orig, size);
640 check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
Michael Ellerman362e7702008-06-24 11:33:03 +1000641 patch_feature_section(~8, &fixup);
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700642 check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0);
Michael Ellerman362e7702008-06-24 11:33:03 +1000643}
644
645static void test_alternative_patching(void)
646{
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700647 extern unsigned int ftr_fixup_test2[];
648 extern unsigned int end_ftr_fixup_test2[];
649 extern unsigned int ftr_fixup_test2_orig[];
650 extern unsigned int ftr_fixup_test2_alt[];
651 extern unsigned int ftr_fixup_test2_expected[];
Michael Ellermancad0e392018-04-17 00:39:03 +1000652 int size = 4 * (end_ftr_fixup_test2 - ftr_fixup_test2);
Michael Ellerman362e7702008-06-24 11:33:03 +1000653
654 fixup.value = fixup.mask = 0xF;
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700655 fixup.start_off = calc_offset(&fixup, ftr_fixup_test2 + 1);
656 fixup.end_off = calc_offset(&fixup, ftr_fixup_test2 + 2);
657 fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test2_alt);
658 fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test2_alt + 1);
Michael Ellerman362e7702008-06-24 11:33:03 +1000659
660 /* Sanity check */
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700661 check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
Michael Ellerman362e7702008-06-24 11:33:03 +1000662
663 /* Check we don't patch if the value matches */
664 patch_feature_section(0xF, &fixup);
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700665 check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
Michael Ellerman362e7702008-06-24 11:33:03 +1000666
667 /* Check we do patch if the value doesn't match */
668 patch_feature_section(0, &fixup);
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700669 check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0);
Michael Ellerman362e7702008-06-24 11:33:03 +1000670
671 /* Check we do patch if the mask doesn't match */
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700672 memcpy(ftr_fixup_test2, ftr_fixup_test2_orig, size);
673 check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
Michael Ellerman362e7702008-06-24 11:33:03 +1000674 patch_feature_section(~0xF, &fixup);
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700675 check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0);
Michael Ellerman362e7702008-06-24 11:33:03 +1000676}
677
678static void test_alternative_case_too_big(void)
679{
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700680 extern unsigned int ftr_fixup_test3[];
681 extern unsigned int end_ftr_fixup_test3[];
682 extern unsigned int ftr_fixup_test3_orig[];
683 extern unsigned int ftr_fixup_test3_alt[];
Michael Ellermancad0e392018-04-17 00:39:03 +1000684 int size = 4 * (end_ftr_fixup_test3 - ftr_fixup_test3);
Michael Ellerman362e7702008-06-24 11:33:03 +1000685
686 fixup.value = fixup.mask = 0xC;
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700687 fixup.start_off = calc_offset(&fixup, ftr_fixup_test3 + 1);
688 fixup.end_off = calc_offset(&fixup, ftr_fixup_test3 + 2);
689 fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test3_alt);
690 fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test3_alt + 2);
Michael Ellerman362e7702008-06-24 11:33:03 +1000691
692 /* Sanity check */
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700693 check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
Michael Ellerman362e7702008-06-24 11:33:03 +1000694
695 /* Expect nothing to be patched, and the error returned to us */
696 check(patch_feature_section(0xF, &fixup) == 1);
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700697 check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
Michael Ellerman362e7702008-06-24 11:33:03 +1000698 check(patch_feature_section(0, &fixup) == 1);
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700699 check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
Michael Ellerman362e7702008-06-24 11:33:03 +1000700 check(patch_feature_section(~0xF, &fixup) == 1);
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700701 check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
Michael Ellerman362e7702008-06-24 11:33:03 +1000702}
703
704static void test_alternative_case_too_small(void)
705{
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700706 extern unsigned int ftr_fixup_test4[];
707 extern unsigned int end_ftr_fixup_test4[];
708 extern unsigned int ftr_fixup_test4_orig[];
709 extern unsigned int ftr_fixup_test4_alt[];
710 extern unsigned int ftr_fixup_test4_expected[];
Michael Ellermancad0e392018-04-17 00:39:03 +1000711 int size = 4 * (end_ftr_fixup_test4 - ftr_fixup_test4);
Michael Ellerman362e7702008-06-24 11:33:03 +1000712 unsigned long flag;
713
714 /* Check a high-bit flag */
715 flag = 1UL << ((sizeof(unsigned long) - 1) * 8);
716 fixup.value = fixup.mask = flag;
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700717 fixup.start_off = calc_offset(&fixup, ftr_fixup_test4 + 1);
718 fixup.end_off = calc_offset(&fixup, ftr_fixup_test4 + 5);
719 fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test4_alt);
720 fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test4_alt + 2);
Michael Ellerman362e7702008-06-24 11:33:03 +1000721
722 /* Sanity check */
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700723 check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
Michael Ellerman362e7702008-06-24 11:33:03 +1000724
725 /* Check we don't patch if the value matches */
726 patch_feature_section(flag, &fixup);
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700727 check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
Michael Ellerman362e7702008-06-24 11:33:03 +1000728
729 /* Check we do patch if the value doesn't match */
730 patch_feature_section(0, &fixup);
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700731 check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0);
Michael Ellerman362e7702008-06-24 11:33:03 +1000732
733 /* Check we do patch if the mask doesn't match */
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700734 memcpy(ftr_fixup_test4, ftr_fixup_test4_orig, size);
735 check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
Michael Ellerman362e7702008-06-24 11:33:03 +1000736 patch_feature_section(~flag, &fixup);
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700737 check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0);
Michael Ellerman362e7702008-06-24 11:33:03 +1000738}
739
740static void test_alternative_case_with_branch(void)
741{
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700742 extern unsigned int ftr_fixup_test5[];
743 extern unsigned int end_ftr_fixup_test5[];
744 extern unsigned int ftr_fixup_test5_expected[];
Michael Ellermancad0e392018-04-17 00:39:03 +1000745 int size = 4 * (end_ftr_fixup_test5 - ftr_fixup_test5);
Michael Ellerman362e7702008-06-24 11:33:03 +1000746
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700747 check(memcmp(ftr_fixup_test5, ftr_fixup_test5_expected, size) == 0);
Michael Ellerman362e7702008-06-24 11:33:03 +1000748}
749
750static void test_alternative_case_with_external_branch(void)
751{
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700752 extern unsigned int ftr_fixup_test6[];
753 extern unsigned int end_ftr_fixup_test6[];
754 extern unsigned int ftr_fixup_test6_expected[];
Michael Ellermancad0e392018-04-17 00:39:03 +1000755 int size = 4 * (end_ftr_fixup_test6 - ftr_fixup_test6);
Michael Ellerman362e7702008-06-24 11:33:03 +1000756
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700757 check(memcmp(ftr_fixup_test6, ftr_fixup_test6_expected, size) == 0);
Michael Ellerman362e7702008-06-24 11:33:03 +1000758}
759
Michael Ellerman6158fae2018-04-17 00:39:05 +1000760static void test_alternative_case_with_branch_to_end(void)
761{
762 extern unsigned int ftr_fixup_test7[];
763 extern unsigned int end_ftr_fixup_test7[];
764 extern unsigned int ftr_fixup_test7_expected[];
765 int size = 4 * (end_ftr_fixup_test7 - ftr_fixup_test7);
766
767 check(memcmp(ftr_fixup_test7, ftr_fixup_test7_expected, size) == 0);
768}
769
Michael Ellerman362e7702008-06-24 11:33:03 +1000770static void test_cpu_macros(void)
771{
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700772 extern u8 ftr_fixup_test_FTR_macros[];
773 extern u8 ftr_fixup_test_FTR_macros_expected[];
774 unsigned long size = ftr_fixup_test_FTR_macros_expected -
775 ftr_fixup_test_FTR_macros;
Michael Ellerman362e7702008-06-24 11:33:03 +1000776
777 /* The fixups have already been done for us during boot */
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700778 check(memcmp(ftr_fixup_test_FTR_macros,
779 ftr_fixup_test_FTR_macros_expected, size) == 0);
Michael Ellerman362e7702008-06-24 11:33:03 +1000780}
781
782static void test_fw_macros(void)
783{
784#ifdef CONFIG_PPC64
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700785 extern u8 ftr_fixup_test_FW_FTR_macros[];
786 extern u8 ftr_fixup_test_FW_FTR_macros_expected[];
787 unsigned long size = ftr_fixup_test_FW_FTR_macros_expected -
788 ftr_fixup_test_FW_FTR_macros;
Michael Ellerman362e7702008-06-24 11:33:03 +1000789
790 /* The fixups have already been done for us during boot */
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700791 check(memcmp(ftr_fixup_test_FW_FTR_macros,
792 ftr_fixup_test_FW_FTR_macros_expected, size) == 0);
Michael Ellerman362e7702008-06-24 11:33:03 +1000793#endif
794}
795
Kumar Gala2d1b2022008-07-02 01:16:40 +1000796static void test_lwsync_macros(void)
797{
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700798 extern u8 lwsync_fixup_test[];
799 extern u8 end_lwsync_fixup_test[];
800 extern u8 lwsync_fixup_test_expected_LWSYNC[];
801 extern u8 lwsync_fixup_test_expected_SYNC[];
802 unsigned long size = end_lwsync_fixup_test -
803 lwsync_fixup_test;
Kumar Gala2d1b2022008-07-02 01:16:40 +1000804
805 /* The fixups have already been done for us during boot */
806 if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) {
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700807 check(memcmp(lwsync_fixup_test,
808 lwsync_fixup_test_expected_LWSYNC, size) == 0);
Kumar Gala2d1b2022008-07-02 01:16:40 +1000809 } else {
Daniel Axtensc69a48c2017-07-12 14:36:07 -0700810 check(memcmp(lwsync_fixup_test,
811 lwsync_fixup_test_expected_SYNC, size) == 0);
Kumar Gala2d1b2022008-07-02 01:16:40 +1000812 }
813}
814
Jordan Niethe785b79d2020-05-06 13:40:45 +1000815#ifdef CONFIG_PPC64
816static void __init test_prefix_patching(void)
817{
818 extern unsigned int ftr_fixup_prefix1[];
819 extern unsigned int end_ftr_fixup_prefix1[];
820 extern unsigned int ftr_fixup_prefix1_orig[];
821 extern unsigned int ftr_fixup_prefix1_expected[];
822 int size = sizeof(unsigned int) * (end_ftr_fixup_prefix1 - ftr_fixup_prefix1);
823
824 fixup.value = fixup.mask = 8;
825 fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix1 + 1);
826 fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix1 + 3);
827 fixup.alt_start_off = fixup.alt_end_off = 0;
828
829 /* Sanity check */
830 check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_orig, size) == 0);
831
832 patch_feature_section(0, &fixup);
833 check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_expected, size) == 0);
834 check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_orig, size) != 0);
835}
836
837static void __init test_prefix_alt_patching(void)
838{
839 extern unsigned int ftr_fixup_prefix2[];
840 extern unsigned int end_ftr_fixup_prefix2[];
841 extern unsigned int ftr_fixup_prefix2_orig[];
842 extern unsigned int ftr_fixup_prefix2_expected[];
843 extern unsigned int ftr_fixup_prefix2_alt[];
844 int size = sizeof(unsigned int) * (end_ftr_fixup_prefix2 - ftr_fixup_prefix2);
845
846 fixup.value = fixup.mask = 8;
847 fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix2 + 1);
848 fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix2 + 3);
849 fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_prefix2_alt);
850 fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_prefix2_alt + 2);
851 /* Sanity check */
852 check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_orig, size) == 0);
853
854 patch_feature_section(0, &fixup);
855 check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_expected, size) == 0);
856 check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_orig, size) != 0);
857}
858
859static void __init test_prefix_word_alt_patching(void)
860{
861 extern unsigned int ftr_fixup_prefix3[];
862 extern unsigned int end_ftr_fixup_prefix3[];
863 extern unsigned int ftr_fixup_prefix3_orig[];
864 extern unsigned int ftr_fixup_prefix3_expected[];
865 extern unsigned int ftr_fixup_prefix3_alt[];
866 int size = sizeof(unsigned int) * (end_ftr_fixup_prefix3 - ftr_fixup_prefix3);
867
868 fixup.value = fixup.mask = 8;
869 fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix3 + 1);
870 fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix3 + 4);
871 fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_prefix3_alt);
872 fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_prefix3_alt + 3);
873 /* Sanity check */
874 check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_orig, size) == 0);
875
876 patch_feature_section(0, &fixup);
877 check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_expected, size) == 0);
878 patch_feature_section(0, &fixup);
879 check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_orig, size) != 0);
880}
881#else
882static inline void test_prefix_patching(void) {}
883static inline void test_prefix_alt_patching(void) {}
884static inline void test_prefix_word_alt_patching(void) {}
885#endif /* CONFIG_PPC64 */
886
Michael Ellerman362e7702008-06-24 11:33:03 +1000887static int __init test_feature_fixups(void)
888{
889 printk(KERN_DEBUG "Running feature fixup self-tests ...\n");
890
891 test_basic_patching();
892 test_alternative_patching();
893 test_alternative_case_too_big();
894 test_alternative_case_too_small();
895 test_alternative_case_with_branch();
896 test_alternative_case_with_external_branch();
Michael Ellerman6158fae2018-04-17 00:39:05 +1000897 test_alternative_case_with_branch_to_end();
Michael Ellerman362e7702008-06-24 11:33:03 +1000898 test_cpu_macros();
899 test_fw_macros();
Kumar Gala2d1b2022008-07-02 01:16:40 +1000900 test_lwsync_macros();
Jordan Niethe785b79d2020-05-06 13:40:45 +1000901 test_prefix_patching();
902 test_prefix_alt_patching();
903 test_prefix_word_alt_patching();
Michael Ellerman362e7702008-06-24 11:33:03 +1000904
905 return 0;
906}
907late_initcall(test_feature_fixups);
908
909#endif /* CONFIG_FTR_FIXUP_SELFTEST */