Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Ard Biesheuvel | da58fb6 | 2015-09-24 13:49:52 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> |
Ard Biesheuvel | da58fb6 | 2015-09-24 13:49:52 -0700 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #include <linux/efi.h> |
| 7 | #include <asm/efi.h> |
| 8 | #include <asm/mach/map.h> |
| 9 | #include <asm/mmu_context.h> |
| 10 | |
Anshuman Khandual | 8b1e0f8 | 2019-07-11 20:58:43 -0700 | [diff] [blame] | 11 | static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data) |
Ard Biesheuvel | 9fc68b7 | 2016-04-25 21:06:42 +0100 | [diff] [blame] | 12 | { |
| 13 | efi_memory_desc_t *md = data; |
| 14 | pte_t pte = *ptep; |
| 15 | |
| 16 | if (md->attribute & EFI_MEMORY_RO) |
| 17 | pte = set_pte_bit(pte, __pgprot(L_PTE_RDONLY)); |
| 18 | if (md->attribute & EFI_MEMORY_XP) |
| 19 | pte = set_pte_bit(pte, __pgprot(L_PTE_XN)); |
| 20 | set_pte_ext(ptep, pte, PTE_EXT_NG); |
| 21 | return 0; |
| 22 | } |
| 23 | |
| 24 | int __init efi_set_mapping_permissions(struct mm_struct *mm, |
| 25 | efi_memory_desc_t *md) |
| 26 | { |
| 27 | unsigned long base, size; |
| 28 | |
| 29 | base = md->virt_addr; |
| 30 | size = md->num_pages << EFI_PAGE_SHIFT; |
| 31 | |
| 32 | /* |
| 33 | * We can only use apply_to_page_range() if we can guarantee that the |
| 34 | * entire region was mapped using pages. This should be the case if the |
| 35 | * region does not cover any naturally aligned SECTION_SIZE sized |
| 36 | * blocks. |
| 37 | */ |
| 38 | if (round_down(base + size, SECTION_SIZE) < |
| 39 | round_up(base, SECTION_SIZE) + SECTION_SIZE) |
| 40 | return apply_to_page_range(mm, base, size, set_permissions, md); |
| 41 | |
| 42 | return 0; |
| 43 | } |
| 44 | |
Ard Biesheuvel | da58fb6 | 2015-09-24 13:49:52 -0700 | [diff] [blame] | 45 | int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) |
| 46 | { |
| 47 | struct map_desc desc = { |
| 48 | .virtual = md->virt_addr, |
| 49 | .pfn = __phys_to_pfn(md->phys_addr), |
| 50 | .length = md->num_pages * EFI_PAGE_SIZE, |
| 51 | }; |
| 52 | |
| 53 | /* |
| 54 | * Order is important here: memory regions may have all of the |
| 55 | * bits below set (and usually do), so we check them in order of |
| 56 | * preference. |
| 57 | */ |
| 58 | if (md->attribute & EFI_MEMORY_WB) |
| 59 | desc.type = MT_MEMORY_RWX; |
| 60 | else if (md->attribute & EFI_MEMORY_WT) |
| 61 | desc.type = MT_MEMORY_RWX_NONCACHED; |
| 62 | else if (md->attribute & EFI_MEMORY_WC) |
| 63 | desc.type = MT_DEVICE_WC; |
| 64 | else |
| 65 | desc.type = MT_DEVICE; |
| 66 | |
| 67 | create_mapping_late(mm, &desc, true); |
Ard Biesheuvel | 9fc68b7 | 2016-04-25 21:06:42 +0100 | [diff] [blame] | 68 | |
| 69 | /* |
| 70 | * If stricter permissions were specified, apply them now. |
| 71 | */ |
| 72 | if (md->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP)) |
| 73 | return efi_set_mapping_permissions(mm, md); |
Ard Biesheuvel | da58fb6 | 2015-09-24 13:49:52 -0700 | [diff] [blame] | 74 | return 0; |
| 75 | } |