blob: c82288569eb1de420cf037e7825e40b9c997713a [file] [log] [blame]
Matt Redfearn279b9912016-03-31 10:05:36 +01001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Support for Kernel relocation at boot time
7 *
8 * Copyright (C) 2015, Imagination Technologies Ltd.
9 * Authors: Matt Redfearn (matt.redfearn@imgtec.com)
10 */
Matt Redfearn405bc8f2016-03-31 10:05:41 +010011#include <asm/bootinfo.h>
Matt Redfearn279b9912016-03-31 10:05:36 +010012#include <asm/cacheflush.h>
Matt Redfearn405bc8f2016-03-31 10:05:41 +010013#include <asm/fw/fw.h>
Matt Redfearn279b9912016-03-31 10:05:36 +010014#include <asm/sections.h>
15#include <asm/setup.h>
16#include <asm/timex.h>
17#include <linux/elf.h>
18#include <linux/kernel.h>
Matt Redfearn405bc8f2016-03-31 10:05:41 +010019#include <linux/libfdt.h>
20#include <linux/of_fdt.h>
Matt Redfearn279b9912016-03-31 10:05:36 +010021#include <linux/sched.h>
22#include <linux/start_kernel.h>
23#include <linux/string.h>
Matt Redfearn405bc8f2016-03-31 10:05:41 +010024#include <linux/printk.h>
Matt Redfearn279b9912016-03-31 10:05:36 +010025
26#define RELOCATED(x) ((void *)((long)x + offset))
27
28extern u32 _relocation_start[]; /* End kernel image / start relocation table */
29extern u32 _relocation_end[]; /* End relocation table */
30
31extern long __start___ex_table; /* Start exception table */
32extern long __stop___ex_table; /* End exception table */
33
Steven J. Hill8cc709d2016-12-09 02:36:22 -060034/*
35 * This function may be defined for a platform to perform any post-relocation
36 * fixup necessary.
37 * Return non-zero to abort relocation
38 */
39int __weak plat_post_relocation(long offset)
40{
41 return 0;
42}
43
44
Matt Redfearn279b9912016-03-31 10:05:36 +010045static inline u32 __init get_synci_step(void)
46{
47 u32 res;
48
49 __asm__("rdhwr %0, $1" : "=r" (res));
50
51 return res;
52}
53
54static void __init sync_icache(void *kbase, unsigned long kernel_length)
55{
56 void *kend = kbase + kernel_length;
57 u32 step = get_synci_step();
58
59 do {
60 __asm__ __volatile__(
61 "synci 0(%0)"
62 : /* no output */
63 : "r" (kbase));
64
65 kbase += step;
66 } while (kbase < kend);
67
68 /* Completion barrier */
69 __sync();
70}
71
72static int __init apply_r_mips_64_rel(u32 *loc_orig, u32 *loc_new, long offset)
73{
74 *(u64 *)loc_new += offset;
75
76 return 0;
77}
78
79static int __init apply_r_mips_32_rel(u32 *loc_orig, u32 *loc_new, long offset)
80{
81 *loc_new += offset;
82
83 return 0;
84}
85
86static int __init apply_r_mips_26_rel(u32 *loc_orig, u32 *loc_new, long offset)
87{
88 unsigned long target_addr = (*loc_orig) & 0x03ffffff;
89
90 if (offset % 4) {
91 pr_err("Dangerous R_MIPS_26 REL relocation\n");
92 return -ENOEXEC;
93 }
94
95 /* Original target address */
96 target_addr <<= 2;
97 target_addr += (unsigned long)loc_orig & ~0x03ffffff;
98
99 /* Get the new target address */
100 target_addr += offset;
101
102 if ((target_addr & 0xf0000000) != ((unsigned long)loc_new & 0xf0000000)) {
103 pr_err("R_MIPS_26 REL relocation overflow\n");
104 return -ENOEXEC;
105 }
106
107 target_addr -= (unsigned long)loc_new & ~0x03ffffff;
108 target_addr >>= 2;
109
110 *loc_new = (*loc_new & ~0x03ffffff) | (target_addr & 0x03ffffff);
111
112 return 0;
113}
114
115
116static int __init apply_r_mips_hi16_rel(u32 *loc_orig, u32 *loc_new, long offset)
117{
118 unsigned long insn = *loc_orig;
119 unsigned long target = (insn & 0xffff) << 16; /* high 16bits of target */
120
121 target += offset;
122
123 *loc_new = (insn & ~0xffff) | ((target >> 16) & 0xffff);
124 return 0;
125}
126
127static int (*reloc_handlers_rel[]) (u32 *, u32 *, long) __initdata = {
128 [R_MIPS_64] = apply_r_mips_64_rel,
129 [R_MIPS_32] = apply_r_mips_32_rel,
130 [R_MIPS_26] = apply_r_mips_26_rel,
131 [R_MIPS_HI16] = apply_r_mips_hi16_rel,
132};
133
134int __init do_relocations(void *kbase_old, void *kbase_new, long offset)
135{
136 u32 *r;
137 u32 *loc_orig;
138 u32 *loc_new;
139 int type;
140 int res;
141
142 for (r = _relocation_start; r < _relocation_end; r++) {
143 /* Sentinel for last relocation */
144 if (*r == 0)
145 break;
146
147 type = (*r >> 24) & 0xff;
148 loc_orig = (void *)(kbase_old + ((*r & 0x00ffffff) << 2));
149 loc_new = RELOCATED(loc_orig);
150
151 if (reloc_handlers_rel[type] == NULL) {
152 /* Unsupported relocation */
153 pr_err("Unhandled relocation type %d at 0x%pK\n",
154 type, loc_orig);
155 return -ENOEXEC;
156 }
157
158 res = reloc_handlers_rel[type](loc_orig, loc_new, offset);
159 if (res)
160 return res;
161 }
162
163 return 0;
164}
165
166/*
167 * The exception table is filled in by the relocs tool after vmlinux is linked.
168 * It must be relocated separately since there will not be any relocation
169 * information for it filled in by the linker.
170 */
171static int __init relocate_exception_table(long offset)
172{
173 unsigned long *etable_start, *etable_end, *e;
174
175 etable_start = RELOCATED(&__start___ex_table);
176 etable_end = RELOCATED(&__stop___ex_table);
177
178 for (e = etable_start; e < etable_end; e++)
179 *e += offset;
180
181 return 0;
182}
183
Matt Redfearn405bc8f2016-03-31 10:05:41 +0100184#ifdef CONFIG_RANDOMIZE_BASE
185
186static inline __init unsigned long rotate_xor(unsigned long hash,
187 const void *area, size_t size)
188{
189 size_t i;
190 unsigned long *ptr = (unsigned long *)area;
191
192 for (i = 0; i < size / sizeof(hash); i++) {
193 /* Rotate by odd number of bits and XOR. */
194 hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
195 hash ^= ptr[i];
196 }
197
198 return hash;
199}
200
201static inline __init unsigned long get_random_boot(void)
202{
203 unsigned long entropy = random_get_entropy();
204 unsigned long hash = 0;
205
206 /* Attempt to create a simple but unpredictable starting entropy. */
207 hash = rotate_xor(hash, linux_banner, strlen(linux_banner));
208
209 /* Add in any runtime entropy we can get */
210 hash = rotate_xor(hash, &entropy, sizeof(entropy));
211
212#if defined(CONFIG_USE_OF)
213 /* Get any additional entropy passed in device tree */
Matt Redfearn47366972016-10-17 17:21:46 +0100214 if (initial_boot_params) {
Matt Redfearn405bc8f2016-03-31 10:05:41 +0100215 int node, len;
216 u64 *prop;
217
218 node = fdt_path_offset(initial_boot_params, "/chosen");
219 if (node >= 0) {
220 prop = fdt_getprop_w(initial_boot_params, node,
221 "kaslr-seed", &len);
222 if (prop && (len == sizeof(u64)))
223 hash = rotate_xor(hash, prop, sizeof(*prop));
224 }
225 }
226#endif /* CONFIG_USE_OF */
227
228 return hash;
229}
230
231static inline __init bool kaslr_disabled(void)
232{
233 char *str;
234
235#if defined(CONFIG_CMDLINE_BOOL)
236 const char *builtin_cmdline = CONFIG_CMDLINE;
237
238 str = strstr(builtin_cmdline, "nokaslr");
239 if (str == builtin_cmdline ||
240 (str > builtin_cmdline && *(str - 1) == ' '))
241 return true;
242#endif
243 str = strstr(arcs_cmdline, "nokaslr");
244 if (str == arcs_cmdline || (str > arcs_cmdline && *(str - 1) == ' '))
245 return true;
246
247 return false;
248}
249
250static inline void __init *determine_relocation_address(void)
251{
252 /* Choose a new address for the kernel */
253 unsigned long kernel_length;
254 void *dest = &_text;
255 unsigned long offset;
256
257 if (kaslr_disabled())
258 return dest;
259
260 kernel_length = (long)_end - (long)(&_text);
261
262 offset = get_random_boot() << 16;
263 offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1);
264 if (offset < kernel_length)
265 offset += ALIGN(kernel_length, 0xffff);
266
267 return RELOCATED(dest);
268}
269
270#else
271
Matt Redfearn279b9912016-03-31 10:05:36 +0100272static inline void __init *determine_relocation_address(void)
273{
274 /*
275 * Choose a new address for the kernel
276 * For now we'll hard code the destination
277 */
278 return (void *)0xffffffff81000000;
279}
280
Matt Redfearn405bc8f2016-03-31 10:05:41 +0100281#endif
282
Matt Redfearn279b9912016-03-31 10:05:36 +0100283static inline int __init relocation_addr_valid(void *loc_new)
284{
285 if ((unsigned long)loc_new & 0x0000ffff) {
286 /* Inappropriately aligned new location */
287 return 0;
288 }
289 if ((unsigned long)loc_new < (unsigned long)&_end) {
290 /* New location overlaps original kernel */
291 return 0;
292 }
293 return 1;
294}
295
296void *__init relocate_kernel(void)
297{
298 void *loc_new;
299 unsigned long kernel_length;
300 unsigned long bss_length;
301 long offset = 0;
302 int res = 1;
303 /* Default to original kernel entry point */
304 void *kernel_entry = start_kernel;
305
Matt Redfearn405bc8f2016-03-31 10:05:41 +0100306 /* Get the command line */
307 fw_init_cmdline();
308#if defined(CONFIG_USE_OF)
309 /* Deal with the device tree */
310 early_init_dt_scan(plat_get_fdt());
311 if (boot_command_line[0]) {
312 /* Boot command line was passed in device tree */
313 strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
314 }
315#endif /* CONFIG_USE_OF */
316
Matt Redfearn279b9912016-03-31 10:05:36 +0100317 kernel_length = (long)(&_relocation_start) - (long)(&_text);
318 bss_length = (long)&__bss_stop - (long)&__bss_start;
319
320 loc_new = determine_relocation_address();
321
322 /* Sanity check relocation address */
323 if (relocation_addr_valid(loc_new))
324 offset = (unsigned long)loc_new - (unsigned long)(&_text);
325
Matt Redfearn405bc8f2016-03-31 10:05:41 +0100326 /* Reset the command line now so we don't end up with a duplicate */
327 arcs_cmdline[0] = '\0';
328
Matt Redfearn279b9912016-03-31 10:05:36 +0100329 if (offset) {
330 /* Copy the kernel to it's new location */
331 memcpy(loc_new, &_text, kernel_length);
332
333 /* Perform relocations on the new kernel */
334 res = do_relocations(&_text, loc_new, offset);
335 if (res < 0)
336 goto out;
337
338 /* Sync the caches ready for execution of new kernel */
339 sync_icache(loc_new, kernel_length);
340
341 res = relocate_exception_table(offset);
342 if (res < 0)
343 goto out;
344
345 /*
346 * The original .bss has already been cleared, and
347 * some variables such as command line parameters
348 * stored to it so make a copy in the new location.
349 */
350 memcpy(RELOCATED(&__bss_start), &__bss_start, bss_length);
351
Steven J. Hill8cc709d2016-12-09 02:36:22 -0600352 /*
353 * Last chance for the platform to abort relocation.
354 * This may also be used by the platform to perform any
355 * initialisation required now that the new kernel is
356 * resident in memory and ready to be executed.
357 */
358 if (plat_post_relocation(offset))
359 goto out;
360
Matt Redfearn279b9912016-03-31 10:05:36 +0100361 /* The current thread is now within the relocated image */
362 __current_thread_info = RELOCATED(&init_thread_union);
363
364 /* Return the new kernel's entry point */
365 kernel_entry = RELOCATED(start_kernel);
366 }
367out:
368 return kernel_entry;
369}
Matt Redfearn405bc8f2016-03-31 10:05:41 +0100370
371/*
372 * Show relocation information on panic.
373 */
374void show_kernel_relocation(const char *level)
375{
376 unsigned long offset;
377
378 offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS);
379
380 if (IS_ENABLED(CONFIG_RELOCATABLE) && offset > 0) {
381 printk(level);
382 pr_cont("Kernel relocated by 0x%pK\n", (void *)offset);
383 pr_cont(" .text @ 0x%pK\n", _text);
384 pr_cont(" .data @ 0x%pK\n", _sdata);
385 pr_cont(" .bss @ 0x%pK\n", __bss_start);
386 }
387}
388
389static int kernel_location_notifier_fn(struct notifier_block *self,
390 unsigned long v, void *p)
391{
392 show_kernel_relocation(KERN_EMERG);
393 return NOTIFY_DONE;
394}
395
396static struct notifier_block kernel_location_notifier = {
397 .notifier_call = kernel_location_notifier_fn
398};
399
400static int __init register_kernel_offset_dumper(void)
401{
402 atomic_notifier_chain_register(&panic_notifier_list,
403 &kernel_location_notifier);
404 return 0;
405}
406__initcall(register_kernel_offset_dumper);