blob: 18481b0e2788a4d2194656574665899742f1cae0 [file] [log] [blame]
Thomas Gleixner40b0b3f2019-06-03 07:44:46 +02001// SPDX-License-Identifier: GPL-2.0-only
R Sharadafce0d572005-06-25 14:58:10 -07002/*
Michael Ellerman3d1229d2005-11-14 23:35:00 +11003 * PPC64 code to handle Linux booting another kernel.
R Sharadafce0d572005-06-25 14:58:10 -07004 *
5 * Copyright (C) 2004-2005, IBM Corp.
6 *
7 * Created by: Milton D Miller II
R Sharadafce0d572005-06-25 14:58:10 -07008 */
9
10
R Sharadafce0d572005-06-25 14:58:10 -070011#include <linux/kexec.h>
12#include <linux/smp.h>
13#include <linux/thread_info.h>
Joe Perchesd200c922009-09-20 18:14:13 -040014#include <linux/init_task.h>
R Sharadafce0d572005-06-25 14:58:10 -070015#include <linux/errno.h>
Matt Evanse2f7f732010-07-29 18:47:17 +000016#include <linux/kernel.h>
Matt Evanse8e5c212010-07-29 18:49:08 +000017#include <linux/cpu.h>
Anton Blanchard79c66ce2013-05-12 15:04:53 +000018#include <linux/hardirq.h>
R Sharadafce0d572005-06-25 14:58:10 -070019
20#include <asm/page.h>
21#include <asm/current.h>
22#include <asm/machdep.h>
23#include <asm/cacheflush.h>
Benjamin Herrenschmidtb970b412016-08-19 14:22:38 +053024#include <asm/firmware.h>
R Sharadafce0d572005-06-25 14:58:10 -070025#include <asm/paca.h>
26#include <asm/mmu.h>
27#include <asm/sections.h> /* _end */
28#include <asm/prom.h>
Paul Mackerras2249ca92005-11-07 13:18:13 +110029#include <asm/smp.h>
K.Prasad5aae8a52010-06-15 11:35:19 +053030#include <asm/hw_breakpoint.h>
Daniel Axtens42f5b4c2016-05-18 11:16:50 +100031#include <asm/asm-prototypes.h>
R Sharadafce0d572005-06-25 14:58:10 -070032
Michael Ellerman3d1229d2005-11-14 23:35:00 +110033int default_machine_kexec_prepare(struct kimage *image)
R Sharadafce0d572005-06-25 14:58:10 -070034{
35 int i;
36 unsigned long begin, end; /* limits of segment */
37 unsigned long low, high; /* limits of blocked memory range */
38 struct device_node *node;
Jeremy Kerra7f67bd2006-07-12 15:35:54 +100039 const unsigned long *basep;
40 const unsigned int *sizep;
R Sharadafce0d572005-06-25 14:58:10 -070041
R Sharadafce0d572005-06-25 14:58:10 -070042 /*
43 * Since we use the kernel fault handlers and paging code to
44 * handle the virtual mode, we must make sure no destination
45 * overlaps kernel static data or bss.
46 */
Maneesh Soni72414d32005-06-25 14:58:28 -070047 for (i = 0; i < image->nr_segments; i++)
R Sharadafce0d572005-06-25 14:58:10 -070048 if (image->segment[i].mem < __pa(_end))
49 return -ETXTBSY;
50
R Sharadafce0d572005-06-25 14:58:10 -070051 /* We also should not overwrite the tce tables */
Anton Blanchard94db7c52011-08-10 20:44:22 +000052 for_each_node_by_type(node, "pci") {
Stephen Rothwelle2eb6392007-04-03 22:26:41 +100053 basep = of_get_property(node, "linux,tce-base", NULL);
54 sizep = of_get_property(node, "linux,tce-size", NULL);
R Sharadafce0d572005-06-25 14:58:10 -070055 if (basep == NULL || sizep == NULL)
56 continue;
57
58 low = *basep;
59 high = low + (*sizep);
60
Maneesh Soni72414d32005-06-25 14:58:28 -070061 for (i = 0; i < image->nr_segments; i++) {
R Sharadafce0d572005-06-25 14:58:10 -070062 begin = image->segment[i].mem;
63 end = begin + image->segment[i].memsz;
64
65 if ((begin < high) && (end > low))
66 return -ETXTBSY;
67 }
68 }
69
70 return 0;
71}
72
R Sharadafce0d572005-06-25 14:58:10 -070073static void copy_segments(unsigned long ind)
74{
75 unsigned long entry;
76 unsigned long *ptr;
77 void *dest;
78 void *addr;
79
80 /*
81 * We rely on kexec_load to create a lists that properly
82 * initializes these pointers before they are used.
83 * We will still crash if the list is wrong, but at least
84 * the compiler will be quiet.
85 */
86 ptr = NULL;
87 dest = NULL;
88
89 for (entry = ind; !(entry & IND_DONE); entry = *ptr++) {
90 addr = __va(entry & PAGE_MASK);
91
92 switch (entry & IND_FLAGS) {
93 case IND_DESTINATION:
94 dest = addr;
95 break;
96 case IND_INDIRECTION:
97 ptr = addr;
98 break;
99 case IND_SOURCE:
100 copy_page(dest, addr);
101 dest += PAGE_SIZE;
102 }
103 }
104}
105
106void kexec_copy_flush(struct kimage *image)
107{
108 long i, nr_segments = image->nr_segments;
109 struct kexec_segment ranges[KEXEC_SEGMENT_MAX];
110
111 /* save the ranges on the stack to efficiently flush the icache */
112 memcpy(ranges, image->segment, sizeof(ranges));
113
114 /*
115 * After this call we may not use anything allocated in dynamic
116 * memory, including *image.
117 *
118 * Only globals and the stack are allowed.
119 */
120 copy_segments(image->head);
121
122 /*
123 * we need to clear the icache for all dest pages sometime,
124 * including ones that were in place on the original copy
125 */
126 for (i = 0; i < nr_segments; i++)
Michael Ellermanb5666f72005-12-05 10:24:33 -0600127 flush_icache_range((unsigned long)__va(ranges[i].mem),
128 (unsigned long)__va(ranges[i].mem + ranges[i].memsz));
R Sharadafce0d572005-06-25 14:58:10 -0700129}
130
131#ifdef CONFIG_SMP
132
Michael Neuling1fc711f2010-05-13 19:40:11 +0000133static int kexec_all_irq_disabled = 0;
134
Michael Ellerman1c21a292008-05-08 14:27:19 +1000135static void kexec_smp_down(void *arg)
R Sharadafce0d572005-06-25 14:58:10 -0700136{
Michael Neuling1fc711f2010-05-13 19:40:11 +0000137 local_irq_disable();
Phileas Fogg8520e4432013-02-23 00:32:19 +0100138 hard_irq_disable();
139
Michael Neuling1fc711f2010-05-13 19:40:11 +0000140 mb(); /* make sure our irqs are disabled before we say they are */
141 get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
142 while(kexec_all_irq_disabled == 0)
143 cpu_relax();
144 mb(); /* make sure all irqs are disabled before this */
K.Prasad5aae8a52010-06-15 11:35:19 +0530145 hw_breakpoint_disable();
Michael Neuling1fc711f2010-05-13 19:40:11 +0000146 /*
147 * Now every CPU has IRQs off, we can clear out any pending
148 * IPIs and be sure that no more will come in after this.
149 */
Michael Ellermanc5e24352005-11-12 00:06:05 +1100150 if (ppc_md.kexec_cpu_down)
151 ppc_md.kexec_cpu_down(0, 1);
R Sharadafce0d572005-06-25 14:58:10 -0700152
R Sharadafce0d572005-06-25 14:58:10 -0700153 kexec_smp_wait();
154 /* NOTREACHED */
155}
156
Michael Neuling1fc711f2010-05-13 19:40:11 +0000157static void kexec_prepare_cpus_wait(int wait_state)
R Sharadafce0d572005-06-25 14:58:10 -0700158{
159 int my_cpu, i, notified=-1;
160
K.Prasad5aae8a52010-06-15 11:35:19 +0530161 hw_breakpoint_disable();
R Sharadafce0d572005-06-25 14:58:10 -0700162 my_cpu = get_cpu();
Matt Evanse2f7f732010-07-29 18:47:17 +0000163 /* Make sure each CPU has at least made it to the state we need.
164 *
165 * FIXME: There is a (slim) chance of a problem if not all of the CPUs
166 * are correctly onlined. If somehow we start a CPU on boot with RTAS
167 * start-cpu, but somehow that CPU doesn't write callin_cpu_map[] in
168 * time, the boot CPU will timeout. If it does eventually execute
Nicholas Piggind2e60072018-02-14 01:08:12 +1000169 * stuff, the secondary will start up (paca_ptrs[]->cpu_start was
170 * written) and get into a peculiar state.
171 * If the platform supports smp_ops->take_timebase(), the secondary CPU
172 * will probably be spinning in there. If not (i.e. pseries), the
173 * secondary will continue on and try to online itself/idle/etc. If it
174 * survives that, we need to find these
175 * possible-but-not-online-but-should-be CPUs and chaperone them into
176 * kexec_smp_wait().
Matt Evanse2f7f732010-07-29 18:47:17 +0000177 */
Matt Evansb636f132010-06-07 21:38:18 +0000178 for_each_online_cpu(i) {
R Sharadafce0d572005-06-25 14:58:10 -0700179 if (i == my_cpu)
180 continue;
181
Nicholas Piggind2e60072018-02-14 01:08:12 +1000182 while (paca_ptrs[i]->kexec_state < wait_state) {
Anton Blanchardb3ca8092005-09-27 21:45:38 -0700183 barrier();
R Sharadafce0d572005-06-25 14:58:10 -0700184 if (i != notified) {
Matt Evanse2f7f732010-07-29 18:47:17 +0000185 printk(KERN_INFO "kexec: waiting for cpu %d "
186 "(physical %d) to enter %i state\n",
Nicholas Piggind2e60072018-02-14 01:08:12 +1000187 i, paca_ptrs[i]->hw_cpu_id, wait_state);
R Sharadafce0d572005-06-25 14:58:10 -0700188 notified = i;
189 }
190 }
191 }
Michael Neuling1fc711f2010-05-13 19:40:11 +0000192 mb();
193}
194
Matt Evanse8e5c212010-07-29 18:49:08 +0000195/*
196 * We need to make sure each present CPU is online. The next kernel will scan
197 * the device tree and assume primary threads are online and query secondary
198 * threads via RTAS to online them if required. If we don't online primary
199 * threads, they will be stuck. However, we also online secondary threads as we
200 * may be using 'cede offline'. In this case RTAS doesn't see the secondary
201 * threads as offline -- and again, these CPUs will be stuck.
202 *
203 * So, we online all CPUs that should be running, including secondary threads.
204 */
205static void wake_offline_cpus(void)
206{
207 int cpu = 0;
208
209 for_each_present_cpu(cpu) {
210 if (!cpu_online(cpu)) {
211 printk(KERN_INFO "kexec: Waking offline cpu %d.\n",
212 cpu);
Srivatsa S. Bhat011e4b02014-05-27 16:25:34 +0530213 WARN_ON(cpu_up(cpu));
Matt Evanse8e5c212010-07-29 18:49:08 +0000214 }
215 }
216}
217
Michael Neuling1fc711f2010-05-13 19:40:11 +0000218static void kexec_prepare_cpus(void)
219{
Matt Evanse8e5c212010-07-29 18:49:08 +0000220 wake_offline_cpus();
Michael Neuling1fc711f2010-05-13 19:40:11 +0000221 smp_call_function(kexec_smp_down, NULL, /* wait */0);
222 local_irq_disable();
Phileas Fogg8520e4432013-02-23 00:32:19 +0100223 hard_irq_disable();
224
Michael Neuling1fc711f2010-05-13 19:40:11 +0000225 mb(); /* make sure IRQs are disabled before we say they are */
226 get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
227
228 kexec_prepare_cpus_wait(KEXEC_STATE_IRQS_OFF);
229 /* we are sure every CPU has IRQs off at this point */
230 kexec_all_irq_disabled = 1;
R Sharadafce0d572005-06-25 14:58:10 -0700231
Matt Evanse2f7f732010-07-29 18:47:17 +0000232 /*
233 * Before removing MMU mappings make sure all CPUs have entered real
234 * mode:
235 */
Michael Neuling1fc711f2010-05-13 19:40:11 +0000236 kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE);
R Sharadafce0d572005-06-25 14:58:10 -0700237
Cédric Le Goaterd2b04b02018-05-08 09:05:14 +0200238 /* after we tell the others to go down */
239 if (ppc_md.kexec_cpu_down)
240 ppc_md.kexec_cpu_down(0, 0);
241
Michael Neuling1fc711f2010-05-13 19:40:11 +0000242 put_cpu();
R Sharadafce0d572005-06-25 14:58:10 -0700243}
244
245#else /* ! SMP */
246
247static void kexec_prepare_cpus(void)
248{
249 /*
250 * move the secondarys to us so that we can copy
251 * the new kernel 0-0x100 safely
252 *
253 * do this if kexec in setup.c ?
Olof Johansson75eedfe2005-08-04 12:53:29 -0700254 *
255 * We need to release the cpus if we are ever going from an
256 * UP to an SMP kernel.
R Sharadafce0d572005-06-25 14:58:10 -0700257 */
Olof Johansson75eedfe2005-08-04 12:53:29 -0700258 smp_release_cpus();
Michael Ellermanc5e24352005-11-12 00:06:05 +1100259 if (ppc_md.kexec_cpu_down)
260 ppc_md.kexec_cpu_down(0, 0);
R Sharadafce0d572005-06-25 14:58:10 -0700261 local_irq_disable();
Phileas Fogg8520e4432013-02-23 00:32:19 +0100262 hard_irq_disable();
R Sharadafce0d572005-06-25 14:58:10 -0700263}
264
265#endif /* SMP */
266
267/*
268 * kexec thread structure and stack.
269 *
270 * We need to make sure that this is 16384-byte aligned due to the
271 * way process stacks are handled. It also must be statically allocated
272 * or allocated as part of the kimage, because everything else may be
273 * overwritten when we copy the kexec image. We piggyback on the
274 * "init_task" linker section here to statically allocate a stack.
275 *
276 * We could use a smaller stack if we don't care about anything using
277 * current, but that audit has not been performed.
278 */
Joe Perchesd200c922009-09-20 18:14:13 -0400279static union thread_union kexec_stack __init_task_data =
280 { };
R Sharadafce0d572005-06-25 14:58:10 -0700281
Matt Evansfc53b422010-07-07 21:55:37 +0000282/*
283 * For similar reasons to the stack above, the kexecing CPU needs to be on a
284 * static PACA; we switch to kexec_paca.
285 */
286struct paca_struct kexec_paca;
287
Geert Uytterhoeven07fb41a72013-10-14 09:40:16 +0200288/* Our assembly helper, in misc_64.S */
Joe Perches9402c952012-01-12 17:17:17 -0800289extern void kexec_sequence(void *newstack, unsigned long start,
290 void *image, void *control,
Benjamin Herrenschmidtb970b412016-08-19 14:22:38 +0530291 void (*clear_all)(void),
292 bool copy_with_mmu_off) __noreturn;
R Sharadafce0d572005-06-25 14:58:10 -0700293
294/* too late to fail here */
Michael Ellerman3d1229d2005-11-14 23:35:00 +1100295void default_machine_kexec(struct kimage *image)
R Sharadafce0d572005-06-25 14:58:10 -0700296{
Benjamin Herrenschmidtb970b412016-08-19 14:22:38 +0530297 bool copy_with_mmu_off;
298
R Sharadafce0d572005-06-25 14:58:10 -0700299 /* prepare control code if any */
300
Michael Ellermancc532912005-12-04 18:39:43 +1100301 /*
302 * If the kexec boot is the normal one, need to shutdown other cpus
303 * into our wait loop and quiesce interrupts.
304 * Otherwise, in the case of crashed mode (crashing_cpu >= 0),
305 * stopping other CPUs and collecting their pt_regs is done before
306 * using debugger IPI.
307 */
308
Hari Bathinic1caae32014-12-18 23:36:55 +0530309 if (!kdump_in_progress())
Mohan Kumar M54622f12008-10-21 17:38:10 +0000310 kexec_prepare_cpus();
R Sharadafce0d572005-06-25 14:58:10 -0700311
Thiago Jung Bauermann0d976312016-11-29 23:45:52 +1100312 printk("kexec: Starting switchover sequence.\n");
Matt Evanse2f7f732010-07-29 18:47:17 +0000313
R Sharadafce0d572005-06-25 14:58:10 -0700314 /* switch to a staticly allocated stack. Based on irq stack code.
Anton Blanchard79c66ce2013-05-12 15:04:53 +0000315 * We setup preempt_count to avoid using VMX in memcpy.
R Sharadafce0d572005-06-25 14:58:10 -0700316 * XXX: the task struct will likely be invalid once we do the copy!
317 */
Christophe Leroyed1cd6d2019-01-31 10:08:58 +0000318 current_thread_info()->flags = 0;
319 current_thread_info()->preempt_count = HARDIRQ_OFFSET;
R Sharadafce0d572005-06-25 14:58:10 -0700320
Matt Evansfc53b422010-07-07 21:55:37 +0000321 /* We need a static PACA, too; copy this CPU's PACA over and switch to
Nicholas Piggin499dcd42018-02-14 01:08:13 +1000322 * it. Also poison per_cpu_offset and NULL lppaca to catch anyone using
323 * non-static data.
Matt Evansfc53b422010-07-07 21:55:37 +0000324 */
325 memcpy(&kexec_paca, get_paca(), sizeof(struct paca_struct));
326 kexec_paca.data_offset = 0xedeaddeadeeeeeeeUL;
Nicholas Piggin499dcd42018-02-14 01:08:13 +1000327#ifdef CONFIG_PPC_PSERIES
328 kexec_paca.lppaca_ptr = NULL;
329#endif
Nicholas Piggind2e60072018-02-14 01:08:12 +1000330 paca_ptrs[kexec_paca.paca_index] = &kexec_paca;
Nicholas Piggin499dcd42018-02-14 01:08:13 +1000331
Matt Evansfc53b422010-07-07 21:55:37 +0000332 setup_paca(&kexec_paca);
333
Nicholas Piggin499dcd42018-02-14 01:08:13 +1000334 /*
335 * The lppaca should be unregistered at this point so the HV won't
336 * touch it. In the case of a crash, none of the lppacas are
337 * unregistered so there is not much we can do about it here.
Matt Evansfc53b422010-07-07 21:55:37 +0000338 */
Nicholas Piggin499dcd42018-02-14 01:08:13 +1000339
Benjamin Herrenschmidtb970b412016-08-19 14:22:38 +0530340 /*
341 * On Book3S, the copy must happen with the MMU off if we are either
342 * using Radix page tables or we are not in an LPAR since we can
343 * overwrite the page tables while copying.
344 *
345 * In an LPAR, we keep the MMU on otherwise we can't access beyond
346 * the RMA. On BookE there is no real MMU off mode, so we have to
347 * keep it enabled as well (but then we have bolted TLB entries).
348 */
349#ifdef CONFIG_PPC_BOOK3E
350 copy_with_mmu_off = false;
351#else
352 copy_with_mmu_off = radix_enabled() ||
353 !(firmware_has_feature(FW_FEATURE_LPAR) ||
354 firmware_has_feature(FW_FEATURE_PS3_LV1));
355#endif
Matt Evansfc53b422010-07-07 21:55:37 +0000356
R Sharadafce0d572005-06-25 14:58:10 -0700357 /* Some things are best done in assembly. Finding globals with
358 * a toc is easier in C, so pass in what we can.
359 */
360 kexec_sequence(&kexec_stack, image->start, image,
Benjamin Herrenschmidtfe036a02016-08-19 14:22:37 +0530361 page_address(image->control_code_page),
Benjamin Herrenschmidtb970b412016-08-19 14:22:38 +0530362 mmu_cleanup_all, copy_with_mmu_off);
R Sharadafce0d572005-06-25 14:58:10 -0700363 /* NOTREACHED */
364}
Michael Ellerman593e5372005-11-12 00:06:06 +1100365
Michael Ellerman4e003742017-10-19 15:08:43 +1100366#ifdef CONFIG_PPC_BOOK3S_64
Michael Ellerman593e5372005-11-12 00:06:06 +1100367/* Values we need to export to the second kernel via the device tree. */
Dale Farnsworth2e8e4f52008-12-16 06:22:59 +0000368static unsigned long htab_base;
Anton Blanchardea961a82014-01-22 08:40:28 +1100369static unsigned long htab_size;
Michael Ellerman593e5372005-11-12 00:06:06 +1100370
371static struct property htab_base_prop = {
372 .name = "linux,htab-base",
373 .length = sizeof(unsigned long),
Stephen Rothwell1a381472007-04-03 10:58:52 +1000374 .value = &htab_base,
Michael Ellerman593e5372005-11-12 00:06:06 +1100375};
376
377static struct property htab_size_prop = {
378 .name = "linux,htab-size",
379 .length = sizeof(unsigned long),
Anton Blanchardea961a82014-01-22 08:40:28 +1100380 .value = &htab_size,
Michael Ellerman593e5372005-11-12 00:06:06 +1100381};
382
Dale Farnsworth6f29c322008-12-17 10:09:06 +0000383static int __init export_htab_values(void)
Michael Ellerman593e5372005-11-12 00:06:06 +1100384{
385 struct device_node *node;
386
Dale Farnsworth2e8e4f52008-12-16 06:22:59 +0000387 /* On machines with no htab htab_address is NULL */
388 if (!htab_address)
Dale Farnsworth6f29c322008-12-17 10:09:06 +0000389 return -ENODEV;
Dale Farnsworth2e8e4f52008-12-16 06:22:59 +0000390
Michael Ellerman593e5372005-11-12 00:06:06 +1100391 node = of_find_node_by_path("/chosen");
392 if (!node)
Dale Farnsworth6f29c322008-12-17 10:09:06 +0000393 return -ENODEV;
Michael Ellerman593e5372005-11-12 00:06:06 +1100394
Milton Millered7b2142008-10-20 15:37:03 +0000395 /* remove any stale propertys so ours can be found */
Suraj Jitindar Singh925e2d12016-04-28 15:34:55 +1000396 of_remove_property(node, of_find_property(node, htab_base_prop.name, NULL));
397 of_remove_property(node, of_find_property(node, htab_size_prop.name, NULL));
Milton Millered7b2142008-10-20 15:37:03 +0000398
Anton Blanchardea961a82014-01-22 08:40:28 +1100399 htab_base = cpu_to_be64(__pa(htab_address));
Nathan Fontenot79d1c712012-10-02 16:58:46 +0000400 of_add_property(node, &htab_base_prop);
Anton Blanchardea961a82014-01-22 08:40:28 +1100401 htab_size = cpu_to_be64(htab_size_bytes);
Nathan Fontenot79d1c712012-10-02 16:58:46 +0000402 of_add_property(node, &htab_size_prop);
Michael Ellerman593e5372005-11-12 00:06:06 +1100403
Michael Ellerman593e5372005-11-12 00:06:06 +1100404 of_node_put(node);
Michael Ellermanaa98c502006-06-23 18:17:32 +1000405 return 0;
Michael Ellerman35dd5432006-05-18 11:16:11 +1000406}
Dale Farnsworth6f29c322008-12-17 10:09:06 +0000407late_initcall(export_htab_values);
Michael Ellerman4e003742017-10-19 15:08:43 +1100408#endif /* CONFIG_PPC_BOOK3S_64 */