Thomas Gleixner | caab277 | 2019-06-03 07:44:50 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Based on arch/arm/include/asm/memory.h |
| 4 | * |
| 5 | * Copyright (C) 2000-2002 Russell King |
| 6 | * Copyright (C) 2012 ARM Ltd. |
| 7 | * |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 8 | * Note: this file should not be included by non-asm/.h files |
| 9 | */ |
| 10 | #ifndef __ASM_MEMORY_H |
| 11 | #define __ASM_MEMORY_H |
| 12 | |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 13 | #include <linux/const.h> |
Will Deacon | d0b3c32 | 2019-08-13 17:22:51 +0100 | [diff] [blame] | 14 | #include <linux/sizes.h> |
Mark Rutland | b653145 | 2017-07-14 19:43:56 +0100 | [diff] [blame] | 15 | #include <asm/page-def.h> |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 16 | |
| 17 | /* |
Mark Rutland | aa03c42 | 2015-01-22 18:20:35 +0000 | [diff] [blame] | 18 | * Size of the PCI I/O space. This must remain a power of two so that |
| 19 | * IO_SPACE_LIMIT acts as a mask for the low bits of I/O addresses. |
| 20 | */ |
| 21 | #define PCI_IO_SIZE SZ_16M |
| 22 | |
| 23 | /* |
Ard Biesheuvel | 3e1907d | 2016-03-30 16:46:00 +0200 | [diff] [blame] | 24 | * VMEMMAP_SIZE - allows the whole linear region to be covered by |
| 25 | * a struct page array |
Steve Capper | ce3aaed | 2019-08-07 16:55:21 +0100 | [diff] [blame] | 26 | * |
| 27 | * If we are configured with a 52-bit kernel VA then our VMEMMAP_SIZE |
Mark Rutland | 77ad4ce | 2019-08-14 14:28:48 +0100 | [diff] [blame] | 28 | * needs to cover the memory region from the beginning of the 52-bit |
| 29 | * PAGE_OFFSET all the way to PAGE_END for 48-bit. This allows us to |
Steve Capper | ce3aaed | 2019-08-07 16:55:21 +0100 | [diff] [blame] | 30 | * keep a constant PAGE_OFFSET and "fallback" to using the higher end |
| 31 | * of the VMEMMAP where 52-bit support is not available in hardware. |
Ard Biesheuvel | 3e1907d | 2016-03-30 16:46:00 +0200 | [diff] [blame] | 32 | */ |
Ard Biesheuvel | 8c96400 | 2020-10-08 17:36:01 +0200 | [diff] [blame] | 33 | #define VMEMMAP_SHIFT (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT) |
| 34 | #define VMEMMAP_SIZE ((_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) >> VMEMMAP_SHIFT) |
Ard Biesheuvel | 3e1907d | 2016-03-30 16:46:00 +0200 | [diff] [blame] | 35 | |
| 36 | /* |
Mark Rutland | 77ad4ce | 2019-08-14 14:28:48 +0100 | [diff] [blame] | 37 | * PAGE_OFFSET - the virtual address of the start of the linear map, at the |
| 38 | * start of the TTBR1 address space. |
| 39 | * PAGE_END - the end of the linear map, where all other kernel mappings begin. |
| 40 | * KIMAGE_VADDR - the virtual address of the start of the kernel image. |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 41 | * VA_BITS - the maximum number of bits for virtual addresses. |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 42 | */ |
Jungseok Lee | e41ceed | 2014-05-12 10:40:38 +0100 | [diff] [blame] | 43 | #define VA_BITS (CONFIG_ARM64_VA_BITS) |
Will Deacon | a5ac40f | 2019-08-13 16:58:36 +0100 | [diff] [blame] | 44 | #define _PAGE_OFFSET(va) (-(UL(1) << (va))) |
Steve Capper | b6d00d4 | 2019-08-07 16:55:22 +0100 | [diff] [blame] | 45 | #define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS)) |
Ard Biesheuvel | f904077 | 2016-02-16 13:52:40 +0100 | [diff] [blame] | 46 | #define KIMAGE_VADDR (MODULES_END) |
Ard Biesheuvel | f4693c2 | 2020-10-08 17:36:00 +0200 | [diff] [blame] | 47 | #define BPF_JIT_REGION_START (_PAGE_END(VA_BITS_MIN)) |
Ard Biesheuvel | 91fc957c | 2018-11-23 23:18:04 +0100 | [diff] [blame] | 48 | #define BPF_JIT_REGION_SIZE (SZ_128M) |
| 49 | #define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE) |
Ard Biesheuvel | f904077 | 2016-02-16 13:52:40 +0100 | [diff] [blame] | 50 | #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) |
Ard Biesheuvel | 91fc957c | 2018-11-23 23:18:04 +0100 | [diff] [blame] | 51 | #define MODULES_VADDR (BPF_JIT_REGION_END) |
Ard Biesheuvel | f80fb3a | 2016-01-26 14:12:01 +0100 | [diff] [blame] | 52 | #define MODULES_VSIZE (SZ_128M) |
Ard Biesheuvel | 8c96400 | 2020-10-08 17:36:01 +0200 | [diff] [blame] | 53 | #define VMEMMAP_START (-(UL(1) << (VA_BITS - VMEMMAP_SHIFT))) |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 54 | #define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE) |
Ard Biesheuvel | 9ad7c6d | 2020-10-08 17:36:02 +0200 | [diff] [blame] | 55 | #define PCI_IO_END (VMEMMAP_START - SZ_8M) |
Mark Rutland | aa03c42 | 2015-01-22 18:20:35 +0000 | [diff] [blame] | 56 | #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) |
Ard Biesheuvel | 9ad7c6d | 2020-10-08 17:36:02 +0200 | [diff] [blame] | 57 | #define FIXADDR_TOP (VMEMMAP_START - SZ_32M) |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 58 | |
Steve Capper | 90ec95c | 2019-08-07 16:55:17 +0100 | [diff] [blame] | 59 | #if VA_BITS > 48 |
| 60 | #define VA_BITS_MIN (48) |
| 61 | #else |
| 62 | #define VA_BITS_MIN (VA_BITS) |
| 63 | #endif |
James Morse | 28c7258 | 2016-04-27 17:47:09 +0100 | [diff] [blame] | 64 | |
Mark Rutland | 77ad4ce | 2019-08-14 14:28:48 +0100 | [diff] [blame] | 65 | #define _PAGE_END(va) (-(UL(1) << ((va) - 1))) |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 66 | |
Will Deacon | d0b3c32 | 2019-08-13 17:22:51 +0100 | [diff] [blame] | 67 | #define KERNEL_START _text |
| 68 | #define KERNEL_END _end |
Ard Biesheuvel | f904077 | 2016-02-16 13:52:40 +0100 | [diff] [blame] | 69 | |
| 70 | /* |
Andrey Konovalov | b2f557e | 2018-12-28 00:29:57 -0800 | [diff] [blame] | 71 | * Generic and tag-based KASAN require 1/8th and 1/16th of the kernel virtual |
| 72 | * address space for the shadow region respectively. They can bloat the stack |
| 73 | * significantly, so double the (minimum) stack size when they are in use. |
Ard Biesheuvel | f904077 | 2016-02-16 13:52:40 +0100 | [diff] [blame] | 74 | */ |
Andrey Konovalov | 0fea6e9 | 2020-12-22 12:02:06 -0800 | [diff] [blame] | 75 | #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) |
Steve Capper | 6bd1d0b | 2019-08-07 16:55:15 +0100 | [diff] [blame] | 76 | #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) |
| 77 | #define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \ |
| 78 | + KASAN_SHADOW_OFFSET) |
Ard Biesheuvel | f4693c2 | 2020-10-08 17:36:00 +0200 | [diff] [blame] | 79 | #define PAGE_END (KASAN_SHADOW_END - (1UL << (vabits_actual - KASAN_SHADOW_SCALE_SHIFT))) |
Mark Rutland | b02faed | 2017-10-03 18:25:46 +0100 | [diff] [blame] | 80 | #define KASAN_THREAD_SHIFT 1 |
Ard Biesheuvel | f904077 | 2016-02-16 13:52:40 +0100 | [diff] [blame] | 81 | #else |
Mark Rutland | b02faed | 2017-10-03 18:25:46 +0100 | [diff] [blame] | 82 | #define KASAN_THREAD_SHIFT 0 |
Ard Biesheuvel | f4693c2 | 2020-10-08 17:36:00 +0200 | [diff] [blame] | 83 | #define PAGE_END (_PAGE_END(VA_BITS_MIN)) |
Will Deacon | 68933aa | 2019-08-13 17:06:29 +0100 | [diff] [blame] | 84 | #endif /* CONFIG_KASAN */ |
Ard Biesheuvel | f904077 | 2016-02-16 13:52:40 +0100 | [diff] [blame] | 85 | |
Mark Rutland | b02faed | 2017-10-03 18:25:46 +0100 | [diff] [blame] | 86 | #define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT) |
Mark Rutland | e306786 | 2017-07-21 14:25:33 +0100 | [diff] [blame] | 87 | |
| 88 | /* |
| 89 | * VMAP'd stacks are allocated at page granularity, so we must ensure that such |
| 90 | * stacks are a multiple of page size. |
| 91 | */ |
| 92 | #if defined(CONFIG_VMAP_STACK) && (MIN_THREAD_SHIFT < PAGE_SHIFT) |
| 93 | #define THREAD_SHIFT PAGE_SHIFT |
| 94 | #else |
| 95 | #define THREAD_SHIFT MIN_THREAD_SHIFT |
| 96 | #endif |
Mark Rutland | dbc9344 | 2017-07-14 16:39:21 +0100 | [diff] [blame] | 97 | |
| 98 | #if THREAD_SHIFT >= PAGE_SHIFT |
| 99 | #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) |
| 100 | #endif |
| 101 | |
| 102 | #define THREAD_SIZE (UL(1) << THREAD_SHIFT) |
| 103 | |
Mark Rutland | e306786 | 2017-07-21 14:25:33 +0100 | [diff] [blame] | 104 | /* |
| 105 | * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by |
| 106 | * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry |
| 107 | * assembly. |
| 108 | */ |
| 109 | #ifdef CONFIG_VMAP_STACK |
| 110 | #define THREAD_ALIGN (2 * THREAD_SIZE) |
| 111 | #else |
| 112 | #define THREAD_ALIGN THREAD_SIZE |
| 113 | #endif |
| 114 | |
Mark Rutland | f60ad4e | 2017-07-20 12:26:48 +0100 | [diff] [blame] | 115 | #define IRQ_STACK_SIZE THREAD_SIZE |
| 116 | |
Mark Rutland | 872d832 | 2017-07-14 20:30:35 +0100 | [diff] [blame] | 117 | #define OVERFLOW_STACK_SIZE SZ_4K |
| 118 | |
Ard Biesheuvel | f904077 | 2016-02-16 13:52:40 +0100 | [diff] [blame] | 119 | /* |
Mark Rutland | 8018ba4 | 2017-07-14 15:38:43 +0100 | [diff] [blame] | 120 | * Alignment of kernel segments (e.g. .text, .data). |
Ard Biesheuvel | e16e65a | 2020-03-29 16:12:58 +0200 | [diff] [blame] | 121 | * |
Mark Rutland | 8018ba4 | 2017-07-14 15:38:43 +0100 | [diff] [blame] | 122 | * 4 KB granule: 16 level 3 entries, with contiguous bit |
| 123 | * 16 KB granule: 4 level 3 entries, without contiguous bit |
| 124 | * 64 KB granule: 1 level 3 entry |
| 125 | */ |
Will Deacon | d0b3c32 | 2019-08-13 17:22:51 +0100 | [diff] [blame] | 126 | #define SEGMENT_ALIGN SZ_64K |
Mark Rutland | 8018ba4 | 2017-07-14 15:38:43 +0100 | [diff] [blame] | 127 | |
Ard Biesheuvel | ab893fb | 2016-02-16 13:52:36 +0100 | [diff] [blame] | 128 | /* |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 129 | * Memory types available. |
Catalin Marinas | 9f34193 | 2019-11-27 10:00:27 +0000 | [diff] [blame] | 130 | * |
| 131 | * IMPORTANT: MT_NORMAL must be index 0 since vm_get_page_prot() may 'or' in |
| 132 | * the MT_NORMAL_TAGGED memory type for PROT_MTE mappings. Note |
| 133 | * that protection_map[] only contains MT_NORMAL attributes. |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 134 | */ |
Catalin Marinas | 9f34193 | 2019-11-27 10:00:27 +0000 | [diff] [blame] | 135 | #define MT_NORMAL 0 |
| 136 | #define MT_NORMAL_TAGGED 1 |
| 137 | #define MT_NORMAL_NC 2 |
| 138 | #define MT_NORMAL_WT 3 |
| 139 | #define MT_DEVICE_nGnRnE 4 |
| 140 | #define MT_DEVICE_nGnRE 5 |
| 141 | #define MT_DEVICE_GRE 6 |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 142 | |
Marc Zyngier | 3631160 | 2012-12-07 18:35:41 +0000 | [diff] [blame] | 143 | /* |
| 144 | * Memory types for Stage-2 translation |
| 145 | */ |
| 146 | #define MT_S2_NORMAL 0xf |
| 147 | #define MT_S2_DEVICE_nGnRE 0x1 |
| 148 | |
Marc Zyngier | e48d53a | 2018-04-06 12:27:28 +0100 | [diff] [blame] | 149 | /* |
| 150 | * Memory types for Stage-2 translation when ID_AA64MMFR2_EL1.FWB is 0001 |
| 151 | * Stage-2 enforces Normal-WB and Device-nGnRE |
| 152 | */ |
| 153 | #define MT_S2_FWB_NORMAL 6 |
| 154 | #define MT_S2_FWB_DEVICE_nGnRE 1 |
| 155 | |
Ard Biesheuvel | 324420b | 2016-02-16 13:52:35 +0100 | [diff] [blame] | 156 | #ifdef CONFIG_ARM64_4K_PAGES |
| 157 | #define IOREMAP_MAX_ORDER (PUD_SHIFT) |
| 158 | #else |
| 159 | #define IOREMAP_MAX_ORDER (PMD_SHIFT) |
| 160 | #endif |
| 161 | |
Joey Gouly | 00ef543 | 2021-02-02 12:36:57 +0000 | [diff] [blame] | 162 | /* |
| 163 | * Open-coded (swapper_pg_dir - reserved_pg_dir) as this cannot be calculated |
| 164 | * until link time. |
| 165 | */ |
| 166 | #define RESERVED_SWAPPER_OFFSET (PAGE_SIZE) |
| 167 | |
Joey Gouly | 0188a89 | 2021-02-02 12:36:58 +0000 | [diff] [blame] | 168 | /* |
| 169 | * Open-coded (swapper_pg_dir - tramp_pg_dir) as this cannot be calculated |
| 170 | * until link time. |
| 171 | */ |
| 172 | #define TRAMP_SWAPPER_OFFSET (2 * PAGE_SIZE) |
| 173 | |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 174 | #ifndef __ASSEMBLY__ |
| 175 | |
Ard Biesheuvel | 8439e62 | 2016-02-22 18:46:04 +0100 | [diff] [blame] | 176 | #include <linux/bitops.h> |
Will Deacon | 5f1f7f6 | 2020-06-30 13:53:07 +0100 | [diff] [blame] | 177 | #include <linux/compiler.h> |
Ard Biesheuvel | a92405f | 2016-02-22 18:46:03 +0100 | [diff] [blame] | 178 | #include <linux/mmdebug.h> |
Will Deacon | 5f1f7f6 | 2020-06-30 13:53:07 +0100 | [diff] [blame] | 179 | #include <linux/types.h> |
| 180 | #include <asm/bug.h> |
| 181 | |
| 182 | extern u64 vabits_actual; |
Ard Biesheuvel | a92405f | 2016-02-22 18:46:03 +0100 | [diff] [blame] | 183 | |
Ard Biesheuvel | 020d044 | 2016-02-26 17:57:14 +0100 | [diff] [blame] | 184 | extern s64 memstart_addr; |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 185 | /* PHYS_OFFSET - the physical address of the start of memory. */ |
Ard Biesheuvel | a92405f | 2016-02-22 18:46:03 +0100 | [diff] [blame] | 186 | #define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; }) |
Ard Biesheuvel | a7f8de1 | 2016-02-16 13:52:42 +0100 | [diff] [blame] | 187 | |
Ard Biesheuvel | 120dc60 | 2020-08-25 15:54:40 +0200 | [diff] [blame] | 188 | /* the virtual base of the kernel image */ |
Ard Biesheuvel | f80fb3a | 2016-01-26 14:12:01 +0100 | [diff] [blame] | 189 | extern u64 kimage_vaddr; |
| 190 | |
Ard Biesheuvel | a7f8de1 | 2016-02-16 13:52:42 +0100 | [diff] [blame] | 191 | /* the offset between the kernel virtual and physical mappings */ |
| 192 | extern u64 kimage_voffset; |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 193 | |
Alexander Popov | 7ede866 | 2016-12-19 16:23:06 -0800 | [diff] [blame] | 194 | static inline unsigned long kaslr_offset(void) |
| 195 | { |
| 196 | return kimage_vaddr - KIMAGE_VADDR; |
| 197 | } |
| 198 | |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 199 | /* |
Ard Biesheuvel | a7f8de1 | 2016-02-16 13:52:42 +0100 | [diff] [blame] | 200 | * Allow all memory at the discovery stage. We will clip it later. |
Ard Biesheuvel | 34ba2c4 | 2015-08-18 10:34:42 +0100 | [diff] [blame] | 201 | */ |
Ard Biesheuvel | a7f8de1 | 2016-02-16 13:52:42 +0100 | [diff] [blame] | 202 | #define MIN_MEMBLOCK_ADDR 0 |
| 203 | #define MAX_MEMBLOCK_ADDR U64_MAX |
Ard Biesheuvel | 34ba2c4 | 2015-08-18 10:34:42 +0100 | [diff] [blame] | 204 | |
| 205 | /* |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 206 | * PFNs are used to describe any physical page; this means |
| 207 | * PFN 0 == physical address 0. |
| 208 | * |
| 209 | * This is the PFN of the first RAM page in the kernel |
| 210 | * direct-mapped view. We assume this is the first page |
| 211 | * of RAM in the mem_map as well. |
| 212 | */ |
| 213 | #define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) |
| 214 | |
| 215 | /* |
Andrey Konovalov | 9c23f84 | 2018-12-28 00:30:12 -0800 | [diff] [blame] | 216 | * When dealing with data aborts, watchpoints, or instruction traps we may end |
| 217 | * up with a tagged userland pointer. Clear the tag to get a sane pointer to |
| 218 | * pass on to access_ok(), for instance. |
| 219 | */ |
Will Deacon | 597399d | 2019-10-15 21:04:18 -0700 | [diff] [blame] | 220 | #define __untagged_addr(addr) \ |
Will Deacon | 9c1cac4 | 2019-08-09 15:39:37 +0100 | [diff] [blame] | 221 | ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55)) |
Andrey Konovalov | 9c23f84 | 2018-12-28 00:30:12 -0800 | [diff] [blame] | 222 | |
Will Deacon | 597399d | 2019-10-15 21:04:18 -0700 | [diff] [blame] | 223 | #define untagged_addr(addr) ({ \ |
Will Deacon | d0022c0 | 2020-02-19 10:19:13 +0000 | [diff] [blame] | 224 | u64 __addr = (__force u64)(addr); \ |
Will Deacon | 597399d | 2019-10-15 21:04:18 -0700 | [diff] [blame] | 225 | __addr &= __untagged_addr(__addr); \ |
| 226 | (__force __typeof__(addr))__addr; \ |
| 227 | }) |
| 228 | |
Andrey Konovalov | 2e903b9 | 2020-12-22 12:02:10 -0800 | [diff] [blame] | 229 | #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) |
Andrey Konovalov | 3c9e3aa | 2018-12-28 00:30:16 -0800 | [diff] [blame] | 230 | #define __tag_shifted(tag) ((u64)(tag) << 56) |
Will Deacon | 597399d | 2019-10-15 21:04:18 -0700 | [diff] [blame] | 231 | #define __tag_reset(addr) __untagged_addr(addr) |
Andrey Konovalov | 3c9e3aa | 2018-12-28 00:30:16 -0800 | [diff] [blame] | 232 | #define __tag_get(addr) (__u8)((u64)(addr) >> 56) |
| 233 | #else |
Will Deacon | 6bbd497 | 2019-08-13 17:01:05 +0100 | [diff] [blame] | 234 | #define __tag_shifted(tag) 0UL |
Andrey Konovalov | 3c9e3aa | 2018-12-28 00:30:16 -0800 | [diff] [blame] | 235 | #define __tag_reset(addr) (addr) |
| 236 | #define __tag_get(addr) 0 |
Andrey Konovalov | 2e903b9 | 2020-12-22 12:02:10 -0800 | [diff] [blame] | 237 | #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ |
Andrey Konovalov | 3c9e3aa | 2018-12-28 00:30:16 -0800 | [diff] [blame] | 238 | |
Will Deacon | 6bbd497 | 2019-08-13 17:01:05 +0100 | [diff] [blame] | 239 | static inline const void *__tag_set(const void *addr, u8 tag) |
| 240 | { |
| 241 | u64 __addr = (u64)addr & ~__tag_shifted(0xff); |
| 242 | return (const void *)(__addr | __tag_shifted(tag)); |
| 243 | } |
Laura Abbott | ec6d06e | 2017-01-10 13:35:50 -0800 | [diff] [blame] | 244 | |
Andrey Konovalov | ccbe2aa | 2020-12-22 12:01:56 -0800 | [diff] [blame] | 245 | #ifdef CONFIG_KASAN_HW_TAGS |
| 246 | #define arch_enable_tagging() mte_enable_kernel() |
Andrey Konovalov | f05842c | 2021-02-24 12:05:26 -0800 | [diff] [blame] | 247 | #define arch_set_tagging_report_once(state) mte_set_report_once(state) |
Andrey Konovalov | ccbe2aa | 2020-12-22 12:01:56 -0800 | [diff] [blame] | 248 | #define arch_init_tags(max_tag) mte_init_tags(max_tag) |
| 249 | #define arch_get_random_tag() mte_get_random_tag() |
| 250 | #define arch_get_mem_tag(addr) mte_get_mem_tag(addr) |
| 251 | #define arch_set_mem_tag_range(addr, size, tag) \ |
| 252 | mte_set_mem_tag_range((addr), (size), (tag)) |
| 253 | #endif /* CONFIG_KASAN_HW_TAGS */ |
| 254 | |
Laura Abbott | ec6d06e | 2017-01-10 13:35:50 -0800 | [diff] [blame] | 255 | /* |
| 256 | * Physical vs virtual RAM address space conversion. These are |
| 257 | * private definitions which should NOT be used outside memory.h |
| 258 | * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. |
| 259 | */ |
| 260 | |
| 261 | |
| 262 | /* |
Vincenzo Frascino | 519ea6f | 2021-01-26 13:40:56 +0000 | [diff] [blame] | 263 | * Check whether an arbitrary address is within the linear map, which |
| 264 | * lives in the [PAGE_OFFSET, PAGE_END) interval at the bottom of the |
| 265 | * kernel's TTBR1 address range. |
Laura Abbott | ec6d06e | 2017-01-10 13:35:50 -0800 | [diff] [blame] | 266 | */ |
Catalin Marinas | 22cd5ed | 2021-02-01 19:06:34 +0000 | [diff] [blame] | 267 | #define __is_lm_address(addr) (((u64)(addr) - PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET)) |
Laura Abbott | ec6d06e | 2017-01-10 13:35:50 -0800 | [diff] [blame] | 268 | |
Catalin Marinas | 22cd5ed | 2021-02-01 19:06:34 +0000 | [diff] [blame] | 269 | #define __lm_to_phys(addr) (((addr) - PAGE_OFFSET) + PHYS_OFFSET) |
Laura Abbott | ec6d06e | 2017-01-10 13:35:50 -0800 | [diff] [blame] | 270 | #define __kimg_to_phys(addr) ((addr) - kimage_voffset) |
| 271 | |
| 272 | #define __virt_to_phys_nodebug(x) ({ \ |
Will Deacon | 577c2b3 | 2019-08-13 16:26:54 +0100 | [diff] [blame] | 273 | phys_addr_t __x = (phys_addr_t)(__tag_reset(x)); \ |
Will Deacon | d0b3c32 | 2019-08-13 17:22:51 +0100 | [diff] [blame] | 274 | __is_lm_address(__x) ? __lm_to_phys(__x) : __kimg_to_phys(__x); \ |
Laura Abbott | ec6d06e | 2017-01-10 13:35:50 -0800 | [diff] [blame] | 275 | }) |
| 276 | |
| 277 | #define __pa_symbol_nodebug(x) __kimg_to_phys((phys_addr_t)(x)) |
| 278 | |
| 279 | #ifdef CONFIG_DEBUG_VIRTUAL |
| 280 | extern phys_addr_t __virt_to_phys(unsigned long x); |
| 281 | extern phys_addr_t __phys_addr_symbol(unsigned long x); |
| 282 | #else |
| 283 | #define __virt_to_phys(x) __virt_to_phys_nodebug(x) |
| 284 | #define __phys_addr_symbol(x) __pa_symbol_nodebug(x) |
Will Deacon | 68933aa | 2019-08-13 17:06:29 +0100 | [diff] [blame] | 285 | #endif /* CONFIG_DEBUG_VIRTUAL */ |
Laura Abbott | 9e22eb6 | 2017-01-10 13:35:47 -0800 | [diff] [blame] | 286 | |
Ard Biesheuvel | 7bc1a0f | 2020-10-08 17:35:59 +0200 | [diff] [blame] | 287 | #define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET) |
Laura Abbott | 9e22eb6 | 2017-01-10 13:35:47 -0800 | [diff] [blame] | 288 | #define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset)) |
| 289 | |
| 290 | /* |
| 291 | * Convert a page to/from a physical address |
| 292 | */ |
| 293 | #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) |
| 294 | #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) |
| 295 | |
| 296 | /* |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 297 | * Note: Drivers should NOT use these. They are the wrong |
| 298 | * translation for translating DMA addresses. Use the driver |
| 299 | * DMA support - see dma-mapping.h. |
| 300 | */ |
Thierry Reding | 09a5723 | 2014-07-28 17:25:48 +0200 | [diff] [blame] | 301 | #define virt_to_phys virt_to_phys |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 302 | static inline phys_addr_t virt_to_phys(const volatile void *x) |
| 303 | { |
| 304 | return __virt_to_phys((unsigned long)(x)); |
| 305 | } |
| 306 | |
Thierry Reding | 09a5723 | 2014-07-28 17:25:48 +0200 | [diff] [blame] | 307 | #define phys_to_virt phys_to_virt |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 308 | static inline void *phys_to_virt(phys_addr_t x) |
| 309 | { |
| 310 | return (void *)(__phys_to_virt(x)); |
| 311 | } |
| 312 | |
| 313 | /* |
| 314 | * Drivers should NOT use these either. |
| 315 | */ |
| 316 | #define __pa(x) __virt_to_phys((unsigned long)(x)) |
Laura Abbott | ec6d06e | 2017-01-10 13:35:50 -0800 | [diff] [blame] | 317 | #define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0)) |
| 318 | #define __pa_nodebug(x) __virt_to_phys_nodebug((unsigned long)(x)) |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 319 | #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) |
| 320 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
Will Deacon | d0b3c32 | 2019-08-13 17:22:51 +0100 | [diff] [blame] | 321 | #define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x))) |
| 322 | #define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 323 | |
| 324 | /* |
Will Deacon | d0b3c32 | 2019-08-13 17:22:51 +0100 | [diff] [blame] | 325 | * virt_to_page(x) convert a _valid_ virtual address to struct page * |
| 326 | * virt_addr_valid(x) indicates whether a virtual address is valid |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 327 | */ |
Neil Zhang | 5fd6690 | 2014-10-28 05:44:01 +0000 | [diff] [blame] | 328 | #define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 329 | |
Miles Chen | eea1bb2 | 2019-04-16 01:36:36 +0800 | [diff] [blame] | 330 | #if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL) |
Will Deacon | d0b3c32 | 2019-08-13 17:22:51 +0100 | [diff] [blame] | 331 | #define virt_to_page(x) pfn_to_page(virt_to_pfn(x)) |
Ard Biesheuvel | 9f28759 | 2016-03-30 16:46:01 +0200 | [diff] [blame] | 332 | #else |
Will Deacon | 96628f0 | 2019-08-13 16:46:11 +0100 | [diff] [blame] | 333 | #define page_to_virt(x) ({ \ |
| 334 | __typeof__(x) __page = x; \ |
Ard Biesheuvel | c1090bb | 2020-11-10 19:05:11 +0100 | [diff] [blame] | 335 | u64 __idx = ((u64)__page - VMEMMAP_START) / sizeof(struct page);\ |
| 336 | u64 __addr = PAGE_OFFSET + (__idx * PAGE_SIZE); \ |
Will Deacon | 96628f0 | 2019-08-13 16:46:11 +0100 | [diff] [blame] | 337 | (void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\ |
Andrey Konovalov | 2813b9c | 2018-12-28 00:30:57 -0800 | [diff] [blame] | 338 | }) |
| 339 | |
Will Deacon | 96628f0 | 2019-08-13 16:46:11 +0100 | [diff] [blame] | 340 | #define virt_to_page(x) ({ \ |
Ard Biesheuvel | c1090bb | 2020-11-10 19:05:11 +0100 | [diff] [blame] | 341 | u64 __idx = (__tag_reset((u64)x) - PAGE_OFFSET) / PAGE_SIZE; \ |
| 342 | u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page)); \ |
| 343 | (struct page *)__addr; \ |
Will Deacon | 96628f0 | 2019-08-13 16:46:11 +0100 | [diff] [blame] | 344 | }) |
Will Deacon | 68933aa | 2019-08-13 17:06:29 +0100 | [diff] [blame] | 345 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */ |
Ard Biesheuvel | 9f28759 | 2016-03-30 16:46:01 +0200 | [diff] [blame] | 346 | |
Will Deacon | 68dd8ef | 2019-08-13 15:52:23 +0100 | [diff] [blame] | 347 | #define virt_addr_valid(addr) ({ \ |
Catalin Marinas | 91cb2c8 | 2021-02-01 19:06:33 +0000 | [diff] [blame] | 348 | __typeof__(addr) __addr = __tag_reset(addr); \ |
Will Deacon | 68dd8ef | 2019-08-13 15:52:23 +0100 | [diff] [blame] | 349 | __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr)); \ |
| 350 | }) |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 351 | |
Anshuman Khandual | 638d503 | 2020-06-29 10:08:31 +0530 | [diff] [blame] | 352 | void dump_mem_limit(void); |
Will Deacon | 68933aa | 2019-08-13 17:06:29 +0100 | [diff] [blame] | 353 | #endif /* !ASSEMBLY */ |
Laura Abbott | ca21945 | 2016-09-21 15:25:04 -0700 | [diff] [blame] | 354 | |
Ard Biesheuvel | 8a5b403d | 2019-02-15 13:33:32 +0100 | [diff] [blame] | 355 | /* |
| 356 | * Given that the GIC architecture permits ITS implementations that can only be |
| 357 | * configured with a LPI table address once, GICv3 systems with many CPUs may |
| 358 | * end up reserving a lot of different regions after a kexec for their LPI |
| 359 | * tables (one per CPU), as we are forced to reuse the same memory after kexec |
| 360 | * (and thus reserve it persistently with EFI beforehand) |
| 361 | */ |
| 362 | #if defined(CONFIG_EFI) && defined(CONFIG_ARM_GIC_V3_ITS) |
| 363 | # define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1) |
| 364 | #endif |
| 365 | |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 366 | #include <asm-generic/memory_model.h> |
| 367 | |
Will Deacon | 68933aa | 2019-08-13 17:06:29 +0100 | [diff] [blame] | 368 | #endif /* __ASM_MEMORY_H */ |