Mark Salter | f84d027 | 2014-04-15 21:59:30 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Extensible Firmware Interface |
| 3 | * |
| 4 | * Based on Extensible Firmware Interface Specification version 2.4 |
| 5 | * |
| 6 | * Copyright (C) 2013, 2014 Linaro Ltd. |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License version 2 as |
| 10 | * published by the Free Software Foundation. |
| 11 | * |
| 12 | */ |
| 13 | |
| 14 | #include <linux/efi.h> |
Ard Biesheuvel | e5bc22a | 2015-11-30 13:28:18 +0100 | [diff] [blame] | 15 | #include <linux/init.h> |
Mark Salter | f84d027 | 2014-04-15 21:59:30 -0400 | [diff] [blame] | 16 | |
Mark Salter | f84d027 | 2014-04-15 21:59:30 -0400 | [diff] [blame] | 17 | #include <asm/efi.h> |
Yi Li | d1ae8c0 | 2014-10-04 23:46:43 +0800 | [diff] [blame] | 18 | |
Ard Biesheuvel | 1fd55a9 | 2016-04-25 21:06:43 +0100 | [diff] [blame] | 19 | /* |
| 20 | * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be |
| 21 | * executable, everything else can be mapped with the XN bits |
| 22 | * set. Also take the new (optional) RO/XP bits into account. |
| 23 | */ |
| 24 | static __init pteval_t create_mapping_protection(efi_memory_desc_t *md) |
| 25 | { |
| 26 | u64 attr = md->attribute; |
| 27 | u32 type = md->type; |
| 28 | |
| 29 | if (type == EFI_MEMORY_MAPPED_IO) |
| 30 | return PROT_DEVICE_nGnRE; |
| 31 | |
| 32 | if (WARN_ONCE(!PAGE_ALIGNED(md->phys_addr), |
| 33 | "UEFI Runtime regions are not aligned to 64 KB -- buggy firmware?")) |
| 34 | /* |
| 35 | * If the region is not aligned to the page size of the OS, we |
| 36 | * can not use strict permissions, since that would also affect |
| 37 | * the mapping attributes of the adjacent regions. |
| 38 | */ |
| 39 | return pgprot_val(PAGE_KERNEL_EXEC); |
| 40 | |
| 41 | /* R-- */ |
| 42 | if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) == |
| 43 | (EFI_MEMORY_XP | EFI_MEMORY_RO)) |
| 44 | return pgprot_val(PAGE_KERNEL_RO); |
| 45 | |
| 46 | /* R-X */ |
| 47 | if (attr & EFI_MEMORY_RO) |
| 48 | return pgprot_val(PAGE_KERNEL_ROX); |
| 49 | |
| 50 | /* RW- */ |
| 51 | if (attr & EFI_MEMORY_XP || type != EFI_RUNTIME_SERVICES_CODE) |
| 52 | return pgprot_val(PAGE_KERNEL); |
| 53 | |
| 54 | /* RWX */ |
| 55 | return pgprot_val(PAGE_KERNEL_EXEC); |
| 56 | } |
| 57 | |
Ard Biesheuvel | 57fdb89a | 2016-04-25 21:06:52 +0100 | [diff] [blame] | 58 | /* we will fill this structure from the stub, so don't put it in .bss */ |
| 59 | struct screen_info screen_info __section(.data); |
| 60 | |
Ard Biesheuvel | f7d9248 | 2015-11-30 13:28:19 +0100 | [diff] [blame] | 61 | int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) |
| 62 | { |
Ard Biesheuvel | 1fd55a9 | 2016-04-25 21:06:43 +0100 | [diff] [blame] | 63 | pteval_t prot_val = create_mapping_protection(md); |
Ard Biesheuvel | f14c66c | 2016-10-21 12:22:57 +0100 | [diff] [blame] | 64 | bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE || |
| 65 | md->type == EFI_RUNTIME_SERVICES_DATA); |
Ard Biesheuvel | f7d9248 | 2015-11-30 13:28:19 +0100 | [diff] [blame] | 66 | |
Ard Biesheuvel | 74c102c | 2016-06-29 14:51:28 +0200 | [diff] [blame] | 67 | if (!PAGE_ALIGNED(md->phys_addr) || |
| 68 | !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) { |
| 69 | /* |
| 70 | * If the end address of this region is not aligned to page |
| 71 | * size, the mapping is rounded up, and may end up sharing a |
| 72 | * page frame with the next UEFI memory region. If we create |
| 73 | * a block entry now, we may need to split it again when mapping |
| 74 | * the next region, and support for that is going to be removed |
| 75 | * from the MMU routines. So avoid block mappings altogether in |
| 76 | * that case. |
| 77 | */ |
Ard Biesheuvel | f14c66c | 2016-10-21 12:22:57 +0100 | [diff] [blame] | 78 | page_mappings_only = true; |
Ard Biesheuvel | 74c102c | 2016-06-29 14:51:28 +0200 | [diff] [blame] | 79 | } |
| 80 | |
Ard Biesheuvel | f7d9248 | 2015-11-30 13:28:19 +0100 | [diff] [blame] | 81 | create_pgd_mapping(mm, md->phys_addr, md->virt_addr, |
| 82 | md->num_pages << EFI_PAGE_SHIFT, |
Ard Biesheuvel | f14c66c | 2016-10-21 12:22:57 +0100 | [diff] [blame] | 83 | __pgprot(prot_val | PTE_NG), page_mappings_only); |
Ard Biesheuvel | f7d9248 | 2015-11-30 13:28:19 +0100 | [diff] [blame] | 84 | return 0; |
| 85 | } |
| 86 | |
Ard Biesheuvel | bd264d0 | 2016-06-29 14:51:27 +0200 | [diff] [blame] | 87 | static int __init set_permissions(pte_t *ptep, pgtable_t token, |
| 88 | unsigned long addr, void *data) |
| 89 | { |
| 90 | efi_memory_desc_t *md = data; |
| 91 | pte_t pte = *ptep; |
| 92 | |
| 93 | if (md->attribute & EFI_MEMORY_RO) |
| 94 | pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); |
| 95 | if (md->attribute & EFI_MEMORY_XP) |
| 96 | pte = set_pte_bit(pte, __pgprot(PTE_PXN)); |
| 97 | set_pte(ptep, pte); |
| 98 | return 0; |
| 99 | } |
| 100 | |
| 101 | int __init efi_set_mapping_permissions(struct mm_struct *mm, |
| 102 | efi_memory_desc_t *md) |
| 103 | { |
| 104 | BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE && |
| 105 | md->type != EFI_RUNTIME_SERVICES_DATA); |
| 106 | |
| 107 | /* |
| 108 | * Calling apply_to_page_range() is only safe on regions that are |
| 109 | * guaranteed to be mapped down to pages. Since we are only called |
| 110 | * for regions that have been mapped using efi_create_mapping() above |
| 111 | * (and this is checked by the generic Memory Attributes table parsing |
| 112 | * routines), there is no need to check that again here. |
| 113 | */ |
| 114 | return apply_to_page_range(mm, md->virt_addr, |
| 115 | md->num_pages << EFI_PAGE_SHIFT, |
| 116 | set_permissions, md); |
| 117 | } |
| 118 | |
Ard Biesheuvel | 60c0d45 | 2015-03-06 15:49:24 +0100 | [diff] [blame] | 119 | /* |
| 120 | * UpdateCapsule() depends on the system being shutdown via |
| 121 | * ResetSystem(). |
| 122 | */ |
| 123 | bool efi_poweroff_required(void) |
| 124 | { |
| 125 | return efi_enabled(EFI_RUNTIME_SERVICES); |
| 126 | } |