Thomas Gleixner | caab277 | 2019-06-03 07:44:50 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Catalin Marinas | b3901d5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Based on arch/arm/mm/context.c |
| 4 | * |
| 5 | * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. |
| 6 | * Copyright (C) 2012 ARM Ltd. |
Catalin Marinas | b3901d5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 7 | */ |
| 8 | |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 9 | #include <linux/bitops.h> |
Catalin Marinas | b3901d5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 10 | #include <linux/sched.h> |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 11 | #include <linux/slab.h> |
Catalin Marinas | b3901d5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 12 | #include <linux/mm.h> |
Catalin Marinas | b3901d5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 13 | |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 14 | #include <asm/cpufeature.h> |
Catalin Marinas | b3901d5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 15 | #include <asm/mmu_context.h> |
Suzuki K Poulose | 13f417f | 2016-02-23 10:31:45 +0000 | [diff] [blame] | 16 | #include <asm/smp.h> |
Catalin Marinas | b3901d5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 17 | #include <asm/tlbflush.h> |
Catalin Marinas | b3901d5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 18 | |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 19 | static u32 asid_bits; |
Catalin Marinas | b3901d5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 20 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); |
Catalin Marinas | b3901d5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 21 | |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 22 | static atomic64_t asid_generation; |
| 23 | static unsigned long *asid_map; |
Catalin Marinas | b3901d5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 24 | |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 25 | static DEFINE_PER_CPU(atomic64_t, active_asids); |
| 26 | static DEFINE_PER_CPU(u64, reserved_asids); |
| 27 | static cpumask_t tlb_flush_pending; |
| 28 | |
| 29 | #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) |
| 30 | #define ASID_FIRST_VERSION (1UL << asid_bits) |
Will Deacon | 0c8ea53 | 2017-08-10 14:10:28 +0100 | [diff] [blame] | 31 | |
Vladimir Murzin | f88f42f | 2020-01-07 10:28:03 +0000 | [diff] [blame] | 32 | #define NUM_USER_ASIDS ASID_FIRST_VERSION |
Will Deacon | 0c8ea53 | 2017-08-10 14:10:28 +0100 | [diff] [blame] | 33 | #define asid2idx(asid) ((asid) & ~ASID_MASK) |
| 34 | #define idx2asid(idx) asid2idx(idx) |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 35 | |
Suzuki K Poulose | 038dc9c | 2016-02-23 10:31:44 +0000 | [diff] [blame] | 36 | /* Get the ASIDBits supported by the current CPU */ |
| 37 | static u32 get_cpu_asid_bits(void) |
| 38 | { |
| 39 | u32 asid; |
Mark Rutland | 1cc6ed9 | 2016-03-04 12:54:05 +0000 | [diff] [blame] | 40 | int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1), |
Suzuki K Poulose | 038dc9c | 2016-02-23 10:31:44 +0000 | [diff] [blame] | 41 | ID_AA64MMFR0_ASID_SHIFT); |
| 42 | |
| 43 | switch (fld) { |
| 44 | default: |
| 45 | pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", |
| 46 | smp_processor_id(), fld); |
| 47 | /* Fallthrough */ |
| 48 | case 0: |
| 49 | asid = 8; |
| 50 | break; |
| 51 | case 2: |
| 52 | asid = 16; |
| 53 | } |
| 54 | |
| 55 | return asid; |
| 56 | } |
| 57 | |
Suzuki K Poulose | 13f417f | 2016-02-23 10:31:45 +0000 | [diff] [blame] | 58 | /* Check if the current cpu's ASIDBits is compatible with asid_bits */ |
| 59 | void verify_cpu_asid_bits(void) |
| 60 | { |
| 61 | u32 asid = get_cpu_asid_bits(); |
| 62 | |
| 63 | if (asid < asid_bits) { |
| 64 | /* |
| 65 | * We cannot decrease the ASID size at runtime, so panic if we support |
| 66 | * fewer ASID bits than the boot CPU. |
| 67 | */ |
| 68 | pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n", |
| 69 | smp_processor_id(), asid, asid_bits); |
Suzuki K Poulose | 17eebd1 | 2016-04-12 15:46:00 +0100 | [diff] [blame] | 70 | cpu_panic_kernel(); |
Suzuki K Poulose | 13f417f | 2016-02-23 10:31:45 +0000 | [diff] [blame] | 71 | } |
| 72 | } |
| 73 | |
Vladimir Murzin | f88f42f | 2020-01-07 10:28:03 +0000 | [diff] [blame] | 74 | static void set_kpti_asid_bits(void) |
| 75 | { |
| 76 | unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long); |
| 77 | /* |
| 78 | * In case of KPTI kernel/user ASIDs are allocated in |
| 79 | * pairs, the bottom bit distinguishes the two: if it |
| 80 | * is set, then the ASID will map only userspace. Thus |
| 81 | * mark even as reserved for kernel. |
| 82 | */ |
| 83 | memset(asid_map, 0xaa, len); |
| 84 | } |
| 85 | |
| 86 | static void set_reserved_asid_bits(void) |
| 87 | { |
| 88 | if (arm64_kernel_unmapped_at_el0()) |
| 89 | set_kpti_asid_bits(); |
| 90 | else |
| 91 | bitmap_clear(asid_map, 0, NUM_USER_ASIDS); |
| 92 | } |
| 93 | |
Shaokun Zhang | 742fafa | 2018-10-06 16:49:04 +0800 | [diff] [blame] | 94 | static void flush_context(void) |
Catalin Marinas | b3901d5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 95 | { |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 96 | int i; |
| 97 | u64 asid; |
| 98 | |
| 99 | /* Update the list of reserved ASIDs and the ASID bitmap. */ |
Vladimir Murzin | f88f42f | 2020-01-07 10:28:03 +0000 | [diff] [blame] | 100 | set_reserved_asid_bits(); |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 101 | |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 102 | for_each_possible_cpu(i) { |
| 103 | asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0); |
| 104 | /* |
| 105 | * If this CPU has already been through a |
| 106 | * rollover, but hasn't run another task in |
| 107 | * the meantime, we must preserve its reserved |
| 108 | * ASID, as this is the only trace we have of |
| 109 | * the process it is still running. |
| 110 | */ |
| 111 | if (asid == 0) |
| 112 | asid = per_cpu(reserved_asids, i); |
Will Deacon | 0c8ea53 | 2017-08-10 14:10:28 +0100 | [diff] [blame] | 113 | __set_bit(asid2idx(asid), asid_map); |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 114 | per_cpu(reserved_asids, i) = asid; |
| 115 | } |
| 116 | |
Mark Rutland | f81a348 | 2017-11-21 11:59:13 +0000 | [diff] [blame] | 117 | /* |
| 118 | * Queue a TLB invalidation for each CPU to perform on next |
| 119 | * context-switch |
| 120 | */ |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 121 | cpumask_setall(&tlb_flush_pending); |
Catalin Marinas | b3901d5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 122 | } |
| 123 | |
Will Deacon | 0ebea80 | 2015-11-26 13:49:39 +0000 | [diff] [blame] | 124 | static bool check_update_reserved_asid(u64 asid, u64 newasid) |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 125 | { |
| 126 | int cpu; |
Will Deacon | 0ebea80 | 2015-11-26 13:49:39 +0000 | [diff] [blame] | 127 | bool hit = false; |
| 128 | |
| 129 | /* |
| 130 | * Iterate over the set of reserved ASIDs looking for a match. |
| 131 | * If we find one, then we can update our mm to use newasid |
| 132 | * (i.e. the same ASID in the current generation) but we can't |
| 133 | * exit the loop early, since we need to ensure that all copies |
| 134 | * of the old ASID are updated to reflect the mm. Failure to do |
| 135 | * so could result in us missing the reserved ASID in a future |
| 136 | * generation. |
| 137 | */ |
| 138 | for_each_possible_cpu(cpu) { |
| 139 | if (per_cpu(reserved_asids, cpu) == asid) { |
| 140 | hit = true; |
| 141 | per_cpu(reserved_asids, cpu) = newasid; |
| 142 | } |
| 143 | } |
| 144 | |
| 145 | return hit; |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 146 | } |
| 147 | |
Shaokun Zhang | 742fafa | 2018-10-06 16:49:04 +0800 | [diff] [blame] | 148 | static u64 new_context(struct mm_struct *mm) |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 149 | { |
| 150 | static u32 cur_idx = 1; |
| 151 | u64 asid = atomic64_read(&mm->context.id); |
| 152 | u64 generation = atomic64_read(&asid_generation); |
| 153 | |
| 154 | if (asid != 0) { |
Will Deacon | 0ebea80 | 2015-11-26 13:49:39 +0000 | [diff] [blame] | 155 | u64 newasid = generation | (asid & ~ASID_MASK); |
| 156 | |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 157 | /* |
| 158 | * If our current ASID was active during a rollover, we |
| 159 | * can continue to use it and this was just a false alarm. |
| 160 | */ |
Will Deacon | 0ebea80 | 2015-11-26 13:49:39 +0000 | [diff] [blame] | 161 | if (check_update_reserved_asid(asid, newasid)) |
| 162 | return newasid; |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 163 | |
| 164 | /* |
| 165 | * We had a valid ASID in a previous life, so try to re-use |
| 166 | * it if possible. |
| 167 | */ |
Will Deacon | 0c8ea53 | 2017-08-10 14:10:28 +0100 | [diff] [blame] | 168 | if (!__test_and_set_bit(asid2idx(asid), asid_map)) |
Will Deacon | 0ebea80 | 2015-11-26 13:49:39 +0000 | [diff] [blame] | 169 | return newasid; |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 170 | } |
| 171 | |
| 172 | /* |
| 173 | * Allocate a free ASID. If we can't find one, take a note of the |
Will Deacon | 0c8ea53 | 2017-08-10 14:10:28 +0100 | [diff] [blame] | 174 | * currently active ASIDs and mark the TLBs as requiring flushes. We |
| 175 | * always count from ASID #2 (index 1), as we use ASID #0 when setting |
| 176 | * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd |
| 177 | * pairs. |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 178 | */ |
| 179 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); |
| 180 | if (asid != NUM_USER_ASIDS) |
| 181 | goto set_asid; |
| 182 | |
| 183 | /* We're out of ASIDs, so increment the global generation count */ |
| 184 | generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION, |
| 185 | &asid_generation); |
Shaokun Zhang | 742fafa | 2018-10-06 16:49:04 +0800 | [diff] [blame] | 186 | flush_context(); |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 187 | |
Jean-Philippe Brucker | f7e0efc | 2016-06-17 18:33:00 +0100 | [diff] [blame] | 188 | /* We have more ASIDs than CPUs, so this will always succeed */ |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 189 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); |
| 190 | |
| 191 | set_asid: |
| 192 | __set_bit(asid, asid_map); |
| 193 | cur_idx = asid; |
Will Deacon | 0c8ea53 | 2017-08-10 14:10:28 +0100 | [diff] [blame] | 194 | return idx2asid(asid) | generation; |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 195 | } |
| 196 | |
| 197 | void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) |
Catalin Marinas | b3901d5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 198 | { |
| 199 | unsigned long flags; |
Catalin Marinas | a8ffaaa | 2017-12-27 15:12:56 +0000 | [diff] [blame] | 200 | u64 asid, old_active_asid; |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 201 | |
Vladimir Murzin | 5ffdfae | 2018-07-31 14:08:56 +0100 | [diff] [blame] | 202 | if (system_supports_cnp()) |
| 203 | cpu_set_reserved_ttbr0(); |
| 204 | |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 205 | asid = atomic64_read(&mm->context.id); |
Catalin Marinas | b3901d5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 206 | |
| 207 | /* |
Will Deacon | 3a33c76 | 2017-11-30 18:25:17 +0000 | [diff] [blame] | 208 | * The memory ordering here is subtle. |
Catalin Marinas | a8ffaaa | 2017-12-27 15:12:56 +0000 | [diff] [blame] | 209 | * If our active_asids is non-zero and the ASID matches the current |
| 210 | * generation, then we update the active_asids entry with a relaxed |
| 211 | * cmpxchg. Racing with a concurrent rollover means that either: |
Will Deacon | 3a33c76 | 2017-11-30 18:25:17 +0000 | [diff] [blame] | 212 | * |
Catalin Marinas | a8ffaaa | 2017-12-27 15:12:56 +0000 | [diff] [blame] | 213 | * - We get a zero back from the cmpxchg and end up waiting on the |
Will Deacon | 3a33c76 | 2017-11-30 18:25:17 +0000 | [diff] [blame] | 214 | * lock. Taking the lock synchronises with the rollover and so |
| 215 | * we are forced to see the updated generation. |
| 216 | * |
Catalin Marinas | a8ffaaa | 2017-12-27 15:12:56 +0000 | [diff] [blame] | 217 | * - We get a valid ASID back from the cmpxchg, which means the |
Will Deacon | 3a33c76 | 2017-11-30 18:25:17 +0000 | [diff] [blame] | 218 | * relaxed xchg in flush_context will treat us as reserved |
| 219 | * because atomic RmWs are totally ordered for a given location. |
Catalin Marinas | b3901d5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 220 | */ |
Catalin Marinas | a8ffaaa | 2017-12-27 15:12:56 +0000 | [diff] [blame] | 221 | old_active_asid = atomic64_read(&per_cpu(active_asids, cpu)); |
| 222 | if (old_active_asid && |
| 223 | !((asid ^ atomic64_read(&asid_generation)) >> asid_bits) && |
| 224 | atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu), |
| 225 | old_active_asid, asid)) |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 226 | goto switch_mm_fastpath; |
| 227 | |
| 228 | raw_spin_lock_irqsave(&cpu_asid_lock, flags); |
| 229 | /* Check that our ASID belongs to the current generation. */ |
| 230 | asid = atomic64_read(&mm->context.id); |
| 231 | if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) { |
Shaokun Zhang | 742fafa | 2018-10-06 16:49:04 +0800 | [diff] [blame] | 232 | asid = new_context(mm); |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 233 | atomic64_set(&mm->context.id, asid); |
Catalin Marinas | b3901d5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 234 | } |
Catalin Marinas | b3901d5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 235 | |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 236 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) |
| 237 | local_flush_tlb_all(); |
Catalin Marinas | b3901d5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 238 | |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 239 | atomic64_set(&per_cpu(active_asids, cpu), asid); |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 240 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); |
Catalin Marinas | b3901d5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 241 | |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 242 | switch_mm_fastpath: |
Marc Zyngier | a8e4c0a | 2018-01-19 15:42:09 +0000 | [diff] [blame] | 243 | |
| 244 | arm64_apply_bp_hardening(); |
| 245 | |
Catalin Marinas | 39bc88e | 2016-09-02 14:54:03 +0100 | [diff] [blame] | 246 | /* |
| 247 | * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when |
| 248 | * emulating PAN. |
| 249 | */ |
| 250 | if (!system_uses_ttbr0_pan()) |
| 251 | cpu_switch_mm(mm->pgd, mm); |
Catalin Marinas | b3901d5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 252 | } |
| 253 | |
Marc Zyngier | 95e3de3 | 2018-01-02 18:19:39 +0000 | [diff] [blame] | 254 | /* Errata workaround post TTBRx_EL1 update. */ |
| 255 | asmlinkage void post_ttbr_update_workaround(void) |
| 256 | { |
| 257 | asm(ALTERNATIVE("nop; nop; nop", |
| 258 | "ic iallu; dsb nsh; isb", |
| 259 | ARM64_WORKAROUND_CAVIUM_27456, |
| 260 | CONFIG_CAVIUM_ERRATUM_27456)); |
| 261 | } |
| 262 | |
Jean-Philippe Brucker | 9abd515 | 2020-02-27 09:34:47 +0100 | [diff] [blame] | 263 | static int asids_update_limit(void) |
Catalin Marinas | b3901d5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 264 | { |
Jean-Philippe Brucker | 9abd515 | 2020-02-27 09:34:47 +0100 | [diff] [blame] | 265 | unsigned long num_available_asids = NUM_USER_ASIDS; |
| 266 | |
| 267 | if (arm64_kernel_unmapped_at_el0()) |
| 268 | num_available_asids /= 2; |
Jean-Philippe Brucker | f7e0efc | 2016-06-17 18:33:00 +0100 | [diff] [blame] | 269 | /* |
| 270 | * Expect allocation after rollover to fail if we don't have at least |
| 271 | * one more ASID than CPUs. ASID #0 is reserved for init_mm. |
| 272 | */ |
Jean-Philippe Brucker | 9abd515 | 2020-02-27 09:34:47 +0100 | [diff] [blame] | 273 | WARN_ON(num_available_asids - 1 <= num_possible_cpus()); |
| 274 | pr_info("ASID allocator initialised with %lu entries\n", |
| 275 | num_available_asids); |
| 276 | return 0; |
| 277 | } |
| 278 | arch_initcall(asids_update_limit); |
| 279 | |
| 280 | static int asids_init(void) |
| 281 | { |
| 282 | asid_bits = get_cpu_asid_bits(); |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 283 | atomic64_set(&asid_generation, ASID_FIRST_VERSION); |
Kees Cook | 6396bb2 | 2018-06-12 14:03:40 -0700 | [diff] [blame] | 284 | asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map), |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 285 | GFP_KERNEL); |
| 286 | if (!asid_map) |
| 287 | panic("Failed to allocate bitmap for %lu ASIDs\n", |
| 288 | NUM_USER_ASIDS); |
| 289 | |
Vladimir Murzin | f88f42f | 2020-01-07 10:28:03 +0000 | [diff] [blame] | 290 | /* |
| 291 | * We cannot call set_reserved_asid_bits() here because CPU |
| 292 | * caps are not finalized yet, so it is safer to assume KPTI |
| 293 | * and reserve kernel ASID's from beginning. |
| 294 | */ |
| 295 | if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) |
| 296 | set_kpti_asid_bits(); |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 297 | return 0; |
Catalin Marinas | b3901d5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 298 | } |
Will Deacon | 5aec715 | 2015-10-06 18:46:24 +0100 | [diff] [blame] | 299 | early_initcall(asids_init); |