blob: 82cd07592519dca244fe14f02cbe553506264e2c [file] [log] [blame]
Mark Salterf84d0272014-04-15 21:59:30 -04001/*
2 * Extensible Firmware Interface
3 *
4 * Based on Extensible Firmware Interface Specification version 2.4
5 *
6 * Copyright (C) 2013, 2014 Linaro Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/efi.h>
Ard Biesheuvele5bc22a2015-11-30 13:28:18 +010015#include <linux/init.h>
Mark Salterf84d0272014-04-15 21:59:30 -040016
Mark Salterf84d0272014-04-15 21:59:30 -040017#include <asm/efi.h>
Yi Lid1ae8c02014-10-04 23:46:43 +080018
Ard Biesheuvel1fd55a92016-04-25 21:06:43 +010019/*
20 * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
21 * executable, everything else can be mapped with the XN bits
22 * set. Also take the new (optional) RO/XP bits into account.
23 */
24static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
25{
26 u64 attr = md->attribute;
27 u32 type = md->type;
28
29 if (type == EFI_MEMORY_MAPPED_IO)
30 return PROT_DEVICE_nGnRE;
31
32 if (WARN_ONCE(!PAGE_ALIGNED(md->phys_addr),
33 "UEFI Runtime regions are not aligned to 64 KB -- buggy firmware?"))
34 /*
35 * If the region is not aligned to the page size of the OS, we
36 * can not use strict permissions, since that would also affect
37 * the mapping attributes of the adjacent regions.
38 */
39 return pgprot_val(PAGE_KERNEL_EXEC);
40
41 /* R-- */
42 if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) ==
43 (EFI_MEMORY_XP | EFI_MEMORY_RO))
44 return pgprot_val(PAGE_KERNEL_RO);
45
46 /* R-X */
47 if (attr & EFI_MEMORY_RO)
48 return pgprot_val(PAGE_KERNEL_ROX);
49
50 /* RW- */
51 if (attr & EFI_MEMORY_XP || type != EFI_RUNTIME_SERVICES_CODE)
52 return pgprot_val(PAGE_KERNEL);
53
54 /* RWX */
55 return pgprot_val(PAGE_KERNEL_EXEC);
56}
57
Ard Biesheuvel57fdb89a2016-04-25 21:06:52 +010058/* we will fill this structure from the stub, so don't put it in .bss */
59struct screen_info screen_info __section(.data);
60
Ard Biesheuvelf7d92482015-11-30 13:28:19 +010061int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
62{
Ard Biesheuvel1fd55a92016-04-25 21:06:43 +010063 pteval_t prot_val = create_mapping_protection(md);
Ard Biesheuvelf14c66c2016-10-21 12:22:57 +010064 bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE ||
65 md->type == EFI_RUNTIME_SERVICES_DATA);
Ard Biesheuvelf7d92482015-11-30 13:28:19 +010066
Ard Biesheuvel74c102c2016-06-29 14:51:28 +020067 if (!PAGE_ALIGNED(md->phys_addr) ||
68 !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) {
69 /*
70 * If the end address of this region is not aligned to page
71 * size, the mapping is rounded up, and may end up sharing a
72 * page frame with the next UEFI memory region. If we create
73 * a block entry now, we may need to split it again when mapping
74 * the next region, and support for that is going to be removed
75 * from the MMU routines. So avoid block mappings altogether in
76 * that case.
77 */
Ard Biesheuvelf14c66c2016-10-21 12:22:57 +010078 page_mappings_only = true;
Ard Biesheuvel74c102c2016-06-29 14:51:28 +020079 }
80
Ard Biesheuvelf7d92482015-11-30 13:28:19 +010081 create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
82 md->num_pages << EFI_PAGE_SHIFT,
Ard Biesheuvelf14c66c2016-10-21 12:22:57 +010083 __pgprot(prot_val | PTE_NG), page_mappings_only);
Ard Biesheuvelf7d92482015-11-30 13:28:19 +010084 return 0;
85}
86
Ard Biesheuvelbd264d02016-06-29 14:51:27 +020087static int __init set_permissions(pte_t *ptep, pgtable_t token,
88 unsigned long addr, void *data)
89{
90 efi_memory_desc_t *md = data;
91 pte_t pte = *ptep;
92
93 if (md->attribute & EFI_MEMORY_RO)
94 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
95 if (md->attribute & EFI_MEMORY_XP)
96 pte = set_pte_bit(pte, __pgprot(PTE_PXN));
97 set_pte(ptep, pte);
98 return 0;
99}
100
101int __init efi_set_mapping_permissions(struct mm_struct *mm,
102 efi_memory_desc_t *md)
103{
104 BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE &&
105 md->type != EFI_RUNTIME_SERVICES_DATA);
106
107 /*
108 * Calling apply_to_page_range() is only safe on regions that are
109 * guaranteed to be mapped down to pages. Since we are only called
110 * for regions that have been mapped using efi_create_mapping() above
111 * (and this is checked by the generic Memory Attributes table parsing
112 * routines), there is no need to check that again here.
113 */
114 return apply_to_page_range(mm, md->virt_addr,
115 md->num_pages << EFI_PAGE_SHIFT,
116 set_permissions, md);
117}
118
Ard Biesheuvel60c0d452015-03-06 15:49:24 +0100119/*
120 * UpdateCapsule() depends on the system being shutdown via
121 * ResetSystem().
122 */
123bool efi_poweroff_required(void)
124{
125 return efi_enabled(EFI_RUNTIME_SERVICES);
126}