blob: 2c1bddccd67b13d3aff2f0ec490e943a604c78b0 [file] [log] [blame]
Ram Pai92e3da32018-01-18 17:50:24 -08001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * PowerPC Memory Protection Keys management
4 *
5 * Copyright 2017, Ram Pai, IBM Corporation.
6 */
7
Ram Pai2ddc53f2018-01-18 17:50:29 -08008#include <asm/mman.h>
Breno Leitao71432ce2018-10-22 11:54:20 -03009#include <asm/mmu_context.h>
Michael Ellerman890274c2019-04-18 16:51:24 +100010#include <asm/mmu.h>
Ram Paicf43d3b2018-01-18 17:50:44 -080011#include <asm/setup.h>
Ram Pai92e3da32018-01-18 17:50:24 -080012#include <linux/pkeys.h>
Aneesh Kumar K.Vd3cd91f2020-07-09 08:59:36 +053013#include <linux/of_fdt.h>
14
Aneesh Kumar K.Vc529afd2020-07-09 08:59:33 +053015int num_pkey; /* Max number of pkeys supported */
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053016/*
17 * Keys marked in the reservation list cannot be allocated by userspace
18 */
Aneesh Kumar K.V3c8ab472020-07-09 08:59:34 +053019u32 reserved_allocation_mask __ro_after_init;
20
21/* Bits set for the initially allocated keys */
22static u32 initial_allocation_mask __ro_after_init;
23
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053024/*
25 * Even if we allocate keys with sys_pkey_alloc(), we need to make sure
26 * other thread still find the access denied using the same keys.
27 */
28static u64 default_amr = ~0x0UL;
29static u64 default_iamr = 0x5555555555555555UL;
30
31/* Allow all keys to be modified by default */
32static u64 default_uamor = ~0x0UL;
33/*
34 * Key used to implement PROT_EXEC mmap. Denies READ/WRITE
35 * We pick key 2 because 0 is special key and 1 is reserved as per ISA.
36 */
Breno Leitao71432ce2018-10-22 11:54:20 -030037static int execute_only_key = 2;
Aneesh Kumar K.V2daf2982020-07-09 08:59:38 +053038static bool pkey_execute_disable_supported;
Ram Pai92e3da32018-01-18 17:50:24 -080039
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053040
Ram Pai4d70b692018-01-18 17:50:27 -080041#define AMR_BITS_PER_PKEY 2
Ram Pai2ddc53f2018-01-18 17:50:29 -080042#define AMR_RD_BIT 0x1UL
43#define AMR_WR_BIT 0x2UL
44#define IAMR_EX_BIT 0x1UL
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053045#define PKEY_REG_BITS (sizeof(u64) * 8)
Ram Pai4d70b692018-01-18 17:50:27 -080046#define pkeyshift(pkey) (PKEY_REG_BITS - ((pkey+1) * AMR_BITS_PER_PKEY))
47
Aneesh Kumar K.Vd3cd91f2020-07-09 08:59:36 +053048static int __init dt_scan_storage_keys(unsigned long node,
49 const char *uname, int depth,
50 void *data)
51{
52 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
53 const __be32 *prop;
54 int *pkeys_total = (int *) data;
55
56 /* We are scanning "cpu" nodes only */
57 if (type == NULL || strcmp(type, "cpu") != 0)
58 return 0;
59
60 prop = of_get_flat_dt_prop(node, "ibm,processor-storage-keys", NULL);
61 if (!prop)
62 return 0;
63 *pkeys_total = be32_to_cpu(prop[0]);
64 return 1;
65}
66
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053067static int scan_pkey_feature(void)
Ram Paicf43d3b2018-01-18 17:50:44 -080068{
Aneesh Kumar K.Vd3cd91f2020-07-09 08:59:36 +053069 int ret;
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053070 int pkeys_total = 0;
Ram Paicf43d3b2018-01-18 17:50:44 -080071
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053072 /*
73 * Pkey is not supported with Radix translation.
74 */
Aneesh Kumar K.Vd3cd91f2020-07-09 08:59:36 +053075 if (early_radix_enabled())
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053076 return 0;
77
Aneesh Kumar K.Vd3cd91f2020-07-09 08:59:36 +053078 ret = of_scan_flat_dt(dt_scan_storage_keys, &pkeys_total);
79 if (ret == 0) {
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053080 /*
81 * Let's assume 32 pkeys on P8/P9 bare metal, if its not defined by device
82 * tree. We make this exception since some version of skiboot forgot to
83 * expose this property on power8/9.
84 */
85 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
86 unsigned long pvr = mfspr(SPRN_PVR);
87
88 if (PVR_VER(pvr) == PVR_POWER8 || PVR_VER(pvr) == PVR_POWER8E ||
89 PVR_VER(pvr) == PVR_POWER8NVL || PVR_VER(pvr) == PVR_POWER9)
90 pkeys_total = 32;
91 }
92 }
Ram Paicf43d3b2018-01-18 17:50:44 -080093
94 /*
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053095 * Adjust the upper limit, based on the number of bits supported by
96 * arch-neutral code.
Ram Paicf43d3b2018-01-18 17:50:44 -080097 */
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053098 pkeys_total = min_t(int, pkeys_total,
99 ((ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) + 1));
100 return pkeys_total;
Ram Paicf43d3b2018-01-18 17:50:44 -0800101}
102
Aneesh Kumar K.Vd3cd91f2020-07-09 08:59:36 +0530103void __init pkey_early_init_devtree(void)
Ram Pai92e3da32018-01-18 17:50:24 -0800104{
Aneesh Kumar K.Vc529afd2020-07-09 08:59:33 +0530105 int pkeys_total, i;
Ram Pai4fb158f2018-01-18 17:50:25 -0800106
Ram Pai92e3da32018-01-18 17:50:24 -0800107 /*
Ram Paidcf87292018-01-18 17:50:30 -0800108 * We define PKEY_DISABLE_EXECUTE in addition to the arch-neutral
109 * generic defines for PKEY_DISABLE_ACCESS and PKEY_DISABLE_WRITE.
110 * Ensure that the bits a distinct.
111 */
112 BUILD_BUG_ON(PKEY_DISABLE_EXECUTE &
113 (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE));
114
115 /*
Ram Pai013a91b2018-01-18 17:50:33 -0800116 * pkey_to_vmflag_bits() assumes that the pkey bits are contiguous
117 * in the vmaflag. Make sure that is really the case.
118 */
119 BUILD_BUG_ON(__builtin_clzl(ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) +
120 __builtin_popcountl(ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT)
121 != (sizeof(u64) * BITS_PER_BYTE));
122
Ram Paicf43d3b2018-01-18 17:50:44 -0800123 /* scan the device tree for pkey feature */
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530124 pkeys_total = scan_pkey_feature();
Aneesh Kumar K.Va4678d42020-07-09 08:59:32 +0530125 if (!pkeys_total) {
126 /* No support for pkey. Mark it disabled */
Aneesh Kumar K.Vd3cd91f2020-07-09 08:59:36 +0530127 return;
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530128 }
Ram Paicf43d3b2018-01-18 17:50:44 -0800129
Aneesh Kumar K.Vd3cd91f2020-07-09 08:59:36 +0530130 cur_cpu_spec->mmu_features |= MMU_FTR_PKEY;
131
Ram Pai92e3da32018-01-18 17:50:24 -0800132 /*
Ram Paicf43d3b2018-01-18 17:50:44 -0800133 * The device tree cannot be relied to indicate support for
134 * execute_disable support. Instead we use a PVR check.
Ram Pai92e3da32018-01-18 17:50:24 -0800135 */
Ram Paicf43d3b2018-01-18 17:50:44 -0800136 if (pvr_version_is(PVR_POWER7) || pvr_version_is(PVR_POWER7p))
137 pkey_execute_disable_supported = false;
138 else
139 pkey_execute_disable_supported = true;
Ram Pai4fb158f2018-01-18 17:50:25 -0800140
141#ifdef CONFIG_PPC_4K_PAGES
142 /*
143 * The OS can manage only 8 pkeys due to its inability to represent them
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530144 * in the Linux 4K PTE. Mark all other keys reserved.
Ram Pai4fb158f2018-01-18 17:50:25 -0800145 */
Aneesh Kumar K.Vc529afd2020-07-09 08:59:33 +0530146 num_pkey = min(8, pkeys_total);
Ram Pai4fb158f2018-01-18 17:50:25 -0800147#else
Aneesh Kumar K.Vc529afd2020-07-09 08:59:33 +0530148 num_pkey = pkeys_total;
Ram Pai4fb158f2018-01-18 17:50:25 -0800149#endif
Ram Paia57a04c2018-07-17 06:51:02 -0700150
Aneesh Kumar K.V2daf2982020-07-09 08:59:38 +0530151 if (unlikely(num_pkey <= execute_only_key) || !pkey_execute_disable_supported) {
Ram Paia4fcc872018-07-17 06:51:07 -0700152 /*
153 * Insufficient number of keys to support
154 * execute only key. Mark it unavailable.
Ram Paia4fcc872018-07-17 06:51:07 -0700155 */
156 execute_only_key = -1;
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530157 } else {
158 /*
159 * Mark the execute_only_pkey as not available for
160 * user allocation via pkey_alloc.
161 */
162 reserved_allocation_mask |= (0x1 << execute_only_key);
163
164 /*
165 * Deny READ/WRITE for execute_only_key.
166 * Allow execute in IAMR.
167 */
168 default_amr |= (0x3ul << pkeyshift(execute_only_key));
169 default_iamr &= ~(0x1ul << pkeyshift(execute_only_key));
170
171 /*
172 * Clear the uamor bits for this key.
173 */
174 default_uamor &= ~(0x3ul << pkeyshift(execute_only_key));
Ram Paia4fcc872018-07-17 06:51:07 -0700175 }
176
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530177 /*
178 * Allow access for only key 0. And prevent any other modification.
179 */
180 default_amr &= ~(0x3ul << pkeyshift(0));
181 default_iamr &= ~(0x1ul << pkeyshift(0));
182 default_uamor &= ~(0x3ul << pkeyshift(0));
183 /*
184 * key 0 is special in that we want to consider it an allocated
185 * key which is preallocated. We don't allow changing AMR bits
186 * w.r.t key 0. But one can pkey_free(key0)
187 */
188 initial_allocation_mask |= (0x1 << 0);
189
190 /*
191 * key 1 is recommended not to be used. PowerISA(3.0) page 1015,
192 * programming note.
193 */
194 reserved_allocation_mask |= (0x1 << 1);
Aneesh Kumar K.V718d9b32020-07-09 08:59:30 +0530195 default_uamor &= ~(0x3ul << pkeyshift(1));
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530196
197 /*
Aneesh Kumar K.Vc529afd2020-07-09 08:59:33 +0530198 * Prevent the usage of OS reserved keys. Update UAMOR
Aneesh Kumar K.V3e4352a2020-07-09 08:59:35 +0530199 * for those keys. Also mark the rest of the bits in the
200 * 32 bit mask as reserved.
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530201 */
Aneesh Kumar K.V3e4352a2020-07-09 08:59:35 +0530202 for (i = num_pkey; i < 32 ; i++) {
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530203 reserved_allocation_mask |= (0x1 << i);
204 default_uamor &= ~(0x3ul << pkeyshift(i));
205 }
206 /*
207 * Prevent the allocation of reserved keys too.
208 */
209 initial_allocation_mask |= reserved_allocation_mask;
210
Aneesh Kumar K.Vd3cd91f2020-07-09 08:59:36 +0530211 return;
Ram Pai92e3da32018-01-18 17:50:24 -0800212}
213
Ram Pai4fb158f2018-01-18 17:50:25 -0800214void pkey_mm_init(struct mm_struct *mm)
215{
Aneesh Kumar K.Vf7045a42020-07-09 08:59:39 +0530216 if (!mmu_has_feature(MMU_FTR_PKEY))
Ram Pai4fb158f2018-01-18 17:50:25 -0800217 return;
218 mm_pkey_allocation_map(mm) = initial_allocation_mask;
Ram Paia4fcc872018-07-17 06:51:07 -0700219 mm->context.execute_only_pkey = execute_only_key;
Ram Pai4fb158f2018-01-18 17:50:25 -0800220}
Ram Pai1b4037d2018-01-18 17:50:26 -0800221
222static inline u64 read_amr(void)
223{
224 return mfspr(SPRN_AMR);
225}
226
227static inline void write_amr(u64 value)
228{
229 mtspr(SPRN_AMR, value);
230}
231
232static inline u64 read_iamr(void)
233{
234 if (!likely(pkey_execute_disable_supported))
235 return 0x0UL;
236
237 return mfspr(SPRN_IAMR);
238}
239
240static inline void write_iamr(u64 value)
241{
242 if (!likely(pkey_execute_disable_supported))
243 return;
244
245 mtspr(SPRN_IAMR, value);
246}
247
248static inline u64 read_uamor(void)
249{
250 return mfspr(SPRN_UAMOR);
251}
252
253static inline void write_uamor(u64 value)
254{
255 mtspr(SPRN_UAMOR, value);
256}
Ram Pai4d70b692018-01-18 17:50:27 -0800257
Ram Pai2ddc53f2018-01-18 17:50:29 -0800258static bool is_pkey_enabled(int pkey)
259{
260 u64 uamor = read_uamor();
261 u64 pkey_bits = 0x3ul << pkeyshift(pkey);
262 u64 uamor_pkey_bits = (uamor & pkey_bits);
263
264 /*
265 * Both the bits in UAMOR corresponding to the key should be set or
266 * reset.
267 */
268 WARN_ON(uamor_pkey_bits && (uamor_pkey_bits != pkey_bits));
269 return !!(uamor_pkey_bits);
270}
271
Ram Pai4d70b692018-01-18 17:50:27 -0800272static inline void init_amr(int pkey, u8 init_bits)
273{
274 u64 new_amr_bits = (((u64)init_bits & 0x3UL) << pkeyshift(pkey));
275 u64 old_amr = read_amr() & ~((u64)(0x3ul) << pkeyshift(pkey));
276
277 write_amr(old_amr | new_amr_bits);
278}
279
280static inline void init_iamr(int pkey, u8 init_bits)
281{
282 u64 new_iamr_bits = (((u64)init_bits & 0x1UL) << pkeyshift(pkey));
283 u64 old_iamr = read_iamr() & ~((u64)(0x1ul) << pkeyshift(pkey));
284
285 write_iamr(old_iamr | new_iamr_bits);
286}
287
Ram Pai2ddc53f2018-01-18 17:50:29 -0800288/*
289 * Set the access rights in AMR IAMR and UAMOR registers for @pkey to that
290 * specified in @init_val.
291 */
292int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
293 unsigned long init_val)
294{
295 u64 new_amr_bits = 0x0ul;
Ram Paidcf87292018-01-18 17:50:30 -0800296 u64 new_iamr_bits = 0x0ul;
Ram Pai2ddc53f2018-01-18 17:50:29 -0800297
298 if (!is_pkey_enabled(pkey))
299 return -EINVAL;
300
Ram Paidcf87292018-01-18 17:50:30 -0800301 if (init_val & PKEY_DISABLE_EXECUTE) {
302 if (!pkey_execute_disable_supported)
303 return -EINVAL;
304 new_iamr_bits |= IAMR_EX_BIT;
305 }
306 init_iamr(pkey, new_iamr_bits);
307
Ram Pai2ddc53f2018-01-18 17:50:29 -0800308 /* Set the bits we need in AMR: */
309 if (init_val & PKEY_DISABLE_ACCESS)
310 new_amr_bits |= AMR_RD_BIT | AMR_WR_BIT;
311 else if (init_val & PKEY_DISABLE_WRITE)
312 new_amr_bits |= AMR_WR_BIT;
313
314 init_amr(pkey, new_amr_bits);
315 return 0;
316}
Ram Pai06bb53b2018-01-18 17:50:31 -0800317
318void thread_pkey_regs_save(struct thread_struct *thread)
319{
Aneesh Kumar K.Vf7045a42020-07-09 08:59:39 +0530320 if (!mmu_has_feature(MMU_FTR_PKEY))
Ram Pai06bb53b2018-01-18 17:50:31 -0800321 return;
322
323 /*
324 * TODO: Skip saving registers if @thread hasn't used any keys yet.
325 */
326 thread->amr = read_amr();
327 thread->iamr = read_iamr();
328 thread->uamor = read_uamor();
329}
330
331void thread_pkey_regs_restore(struct thread_struct *new_thread,
332 struct thread_struct *old_thread)
333{
Aneesh Kumar K.Vf7045a42020-07-09 08:59:39 +0530334 if (!mmu_has_feature(MMU_FTR_PKEY))
Ram Pai06bb53b2018-01-18 17:50:31 -0800335 return;
336
Ram Pai06bb53b2018-01-18 17:50:31 -0800337 if (old_thread->amr != new_thread->amr)
338 write_amr(new_thread->amr);
339 if (old_thread->iamr != new_thread->iamr)
340 write_iamr(new_thread->iamr);
341 if (old_thread->uamor != new_thread->uamor)
342 write_uamor(new_thread->uamor);
343}
344
345void thread_pkey_regs_init(struct thread_struct *thread)
346{
Aneesh Kumar K.Vf7045a42020-07-09 08:59:39 +0530347 if (!mmu_has_feature(MMU_FTR_PKEY))
Ram Pai06bb53b2018-01-18 17:50:31 -0800348 return;
349
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530350 thread->amr = default_amr;
351 thread->iamr = default_iamr;
352 thread->uamor = default_uamor;
Ram Paia57a04c2018-07-17 06:51:02 -0700353
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530354 write_amr(default_amr);
355 write_iamr(default_iamr);
356 write_uamor(default_uamor);
Ram Pai06bb53b2018-01-18 17:50:31 -0800357}
Ram Pai5586cf62018-01-18 17:50:32 -0800358
Aneesh Kumar K.V2daf2982020-07-09 08:59:38 +0530359int execute_only_pkey(struct mm_struct *mm)
Ram Pai5586cf62018-01-18 17:50:32 -0800360{
Ram Paia4fcc872018-07-17 06:51:07 -0700361 return mm->context.execute_only_pkey;
Ram Pai5586cf62018-01-18 17:50:32 -0800362}
Ram Pai87bbabb2018-01-18 17:50:34 -0800363
364static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma)
365{
366 /* Do this check first since the vm_flags should be hot */
Anshuman Khandual6cb4d9a2020-04-10 14:33:09 -0700367 if ((vma->vm_flags & VM_ACCESS_FLAGS) != VM_EXEC)
Ram Pai87bbabb2018-01-18 17:50:34 -0800368 return false;
369
370 return (vma_pkey(vma) == vma->vm_mm->context.execute_only_pkey);
371}
372
373/*
374 * This should only be called for *plain* mprotect calls.
375 */
376int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot,
377 int pkey)
378{
379 /*
380 * If the currently associated pkey is execute-only, but the requested
Ram Paieabdb8c2018-05-04 13:01:51 -0700381 * protection is not execute-only, move it back to the default pkey.
Ram Pai87bbabb2018-01-18 17:50:34 -0800382 */
Ram Paieabdb8c2018-05-04 13:01:51 -0700383 if (vma_is_pkey_exec_only(vma) && (prot != PROT_EXEC))
Ram Pai87bbabb2018-01-18 17:50:34 -0800384 return 0;
385
386 /*
387 * The requested protection is execute-only. Hence let's use an
388 * execute-only pkey.
389 */
390 if (prot == PROT_EXEC) {
391 pkey = execute_only_pkey(vma->vm_mm);
392 if (pkey > 0)
393 return pkey;
394 }
395
396 /* Nothing to override. */
397 return vma_pkey(vma);
398}
Ram Paif2407ef2018-01-18 17:50:37 -0800399
400static bool pkey_access_permitted(int pkey, bool write, bool execute)
401{
402 int pkey_shift;
403 u64 amr;
404
Ram Paif2407ef2018-01-18 17:50:37 -0800405 pkey_shift = pkeyshift(pkey);
Aneesh Kumar K.V192b6a72020-07-12 18:50:47 +0530406 if (execute)
407 return !(read_iamr() & (IAMR_EX_BIT << pkey_shift));
Ram Paif2407ef2018-01-18 17:50:37 -0800408
Aneesh Kumar K.V192b6a72020-07-12 18:50:47 +0530409 amr = read_amr();
410 if (write)
411 return !(amr & (AMR_WR_BIT << pkey_shift));
412
413 return !(amr & (AMR_RD_BIT << pkey_shift));
Ram Paif2407ef2018-01-18 17:50:37 -0800414}
415
416bool arch_pte_access_permitted(u64 pte, bool write, bool execute)
417{
Aneesh Kumar K.Vf7045a42020-07-09 08:59:39 +0530418 if (!mmu_has_feature(MMU_FTR_PKEY))
Ram Paif2407ef2018-01-18 17:50:37 -0800419 return true;
420
421 return pkey_access_permitted(pte_to_pkey_bits(pte), write, execute);
422}
Ram Pai11375732018-01-18 17:50:39 -0800423
424/*
425 * We only want to enforce protection keys on the current thread because we
426 * effectively have no access to AMR/IAMR for other threads or any way to tell
427 * which AMR/IAMR in a threaded process we could use.
428 *
429 * So do not enforce things if the VMA is not from the current mm, or if we are
430 * in a kernel thread.
431 */
Ram Pai11375732018-01-18 17:50:39 -0800432bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
433 bool execute, bool foreign)
434{
Aneesh Kumar K.Vf7045a42020-07-09 08:59:39 +0530435 if (!mmu_has_feature(MMU_FTR_PKEY))
Ram Pai11375732018-01-18 17:50:39 -0800436 return true;
437 /*
438 * Do not enforce our key-permissions on a foreign vma.
439 */
440 if (foreign || vma_is_foreign(vma))
441 return true;
442
443 return pkey_access_permitted(vma_pkey(vma), write, execute);
444}
Ram Pai2cd4bd12018-12-20 12:03:30 -0800445
446void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm)
447{
Aneesh Kumar K.Vf7045a42020-07-09 08:59:39 +0530448 if (!mmu_has_feature(MMU_FTR_PKEY))
Ram Pai2cd4bd12018-12-20 12:03:30 -0800449 return;
450
451 /* Duplicate the oldmm pkey state in mm: */
452 mm_pkey_allocation_map(mm) = mm_pkey_allocation_map(oldmm);
453 mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
454}