blob: ec6fefbfd3c0454b009a2a7a04a76edc44b8c5e8 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Eric W. Biederman700efc12008-02-23 09:58:20 +01002/*
3 * linux/arch/i386/kernel/head32.c -- prepare to run common code
4 *
5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * Copyright (C) 2007 Eric Biederman <ebiederm@xmission.com>
7 */
8
9#include <linux/init.h>
10#include <linux/start_kernel.h>
Yinghai Luc967da62010-03-28 19:42:55 -070011#include <linux/mm.h>
Yinghai Lu72d7c3b2010-08-25 13:39:17 -070012#include <linux/memblock.h>
Eric W. Biederman700efc12008-02-23 09:58:20 +010013
Thomas Gleixner87e81782017-08-28 08:47:48 +020014#include <asm/desc.h>
Yinghai Lua4c81cf2008-05-18 01:18:57 -070015#include <asm/setup.h>
16#include <asm/sections.h>
Ingo Molnar66441bd2017-01-27 10:27:10 +010017#include <asm/e820/api.h>
Thomas Gleixner816c25e2009-08-19 14:36:27 +020018#include <asm/page.h>
Thomas Gleixnerde934102009-08-20 09:27:29 +020019#include <asm/apic.h>
20#include <asm/io_apic.h>
Thomas Gleixner47a3d5d2009-08-29 15:03:59 +020021#include <asm/bios_ebda.h>
Borislav Petkovb40827fa2010-08-28 15:58:33 +020022#include <asm/tlbflush.h>
H. Peter Anvin5dcd14e2013-01-29 01:05:24 -080023#include <asm/bootparam_utils.h>
Thomas Gleixner47a3d5d2009-08-29 15:03:59 +020024
25static void __init i386_default_early_setup(void)
26{
Uwe Kleine-König421f91d2010-06-11 12:17:00 +020027 /* Initialize 32bit specific setup functions */
Thomas Gleixner47a3d5d2009-08-29 15:03:59 +020028 x86_init.resources.reserve_resources = i386_reserve_resources;
29 x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc;
Thomas Gleixner47a3d5d2009-08-29 15:03:59 +020030}
Yinghai Lua4c81cf2008-05-18 01:18:57 -070031
Andi Kleen2605fc22014-05-02 00:44:37 +020032asmlinkage __visible void __init i386_start_kernel(void)
Eric W. Biederman700efc12008-02-23 09:58:20 +010033{
Thomas Gleixner9c48c092017-10-16 12:12:16 +020034 /* Make sure IDT is set up before any exception happens */
Thomas Gleixner87e81782017-08-28 08:47:48 +020035 idt_setup_early_handler();
36
Thomas Gleixner9c48c092017-10-16 12:12:16 +020037 cr4_init_shadow();
38
H. Peter Anvin5dcd14e2013-01-29 01:05:24 -080039 sanitize_boot_params(&boot_params);
40
Luis R. Rodriguez8d152e72016-04-13 17:04:34 -070041 x86_early_init_platform_quirks();
42
Thomas Gleixner47a3d5d2009-08-29 15:03:59 +020043 /* Call the subarch specific early setup function */
44 switch (boot_params.hdr.hardware_subarch) {
Kuppuswamy Sathyanarayanan712b6aa2013-10-17 15:35:29 -070045 case X86_SUBARCH_INTEL_MID:
46 x86_intel_mid_early_setup();
Thomas Gleixner3f4110a2009-08-29 14:54:20 +020047 break;
Thomas Gleixnerc751e172010-11-09 12:08:04 -080048 case X86_SUBARCH_CE4100:
49 x86_ce4100_early_setup();
50 break;
Thomas Gleixner47a3d5d2009-08-29 15:03:59 +020051 default:
52 i386_default_early_setup();
53 break;
54 }
Yinghai Lua4c81cf2008-05-18 01:18:57 -070055
Eric W. Biederman700efc12008-02-23 09:58:20 +010056 start_kernel();
57}
Boris Ostrovsky1e620f92016-12-08 11:44:31 -050058
59/*
60 * Initialize page tables. This creates a PDE and a set of page
61 * tables, which are located immediately beyond __brk_base. The variable
62 * _brk_end is set up to point to the first "safe" location.
63 * Mappings are created both at virtual address 0 (identity mapping)
64 * and PAGE_OFFSET for up to _end.
65 *
66 * In PAE mode initial_page_table is statically defined to contain
67 * enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3
68 * entries). The identity mapping is handled by pointing two PGD entries
69 * to the first kernel PMD. Note the upper half of each PMD or PTE are
70 * always zero at this stage.
71 */
72void __init mk_early_pgtbl_32(void)
73{
74#ifdef __pa
75#undef __pa
76#endif
77#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
78 pte_t pte, *ptep;
79 int i;
80 unsigned long *ptr;
81 /* Enough space to fit pagetables for the low memory linear map */
82 const unsigned long limit = __pa(_end) +
83 (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT);
84#ifdef CONFIG_X86_PAE
85 pmd_t pl2, *pl2p = (pmd_t *)__pa(initial_pg_pmd);
86#define SET_PL2(pl2, val) { (pl2).pmd = (val); }
87#else
88 pgd_t pl2, *pl2p = (pgd_t *)__pa(initial_page_table);
89#define SET_PL2(pl2, val) { (pl2).pgd = (val); }
90#endif
91
92 ptep = (pte_t *)__pa(__brk_base);
93 pte.pte = PTE_IDENT_ATTR;
94
95 while ((pte.pte & PTE_PFN_MASK) < limit) {
96
97 SET_PL2(pl2, (unsigned long)ptep | PDE_IDENT_ATTR);
98 *pl2p = pl2;
99#ifndef CONFIG_X86_PAE
100 /* Kernel PDE entry */
101 *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2;
102#endif
103 for (i = 0; i < PTRS_PER_PTE; i++) {
104 *ptep = pte;
105 pte.pte += PAGE_SIZE;
106 ptep++;
107 }
108
109 pl2p++;
110 }
111
112 ptr = (unsigned long *)__pa(&max_pfn_mapped);
113 /* Can't use pte_pfn() since it's a call with CONFIG_PARAVIRT */
114 *ptr = (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
115
116 ptr = (unsigned long *)__pa(&_brk_end);
117 *ptr = (unsigned long)ptep + PAGE_OFFSET;
118}
119