blob: d45864a77066b1ea425b4a271b0e73ff0476ee55 [file] [log] [blame]
Ram Pai92e3da32018-01-18 17:50:24 -08001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * PowerPC Memory Protection Keys management
4 *
5 * Copyright 2017, Ram Pai, IBM Corporation.
6 */
7
Ram Pai2ddc53f2018-01-18 17:50:29 -08008#include <asm/mman.h>
Breno Leitao71432ce2018-10-22 11:54:20 -03009#include <asm/mmu_context.h>
Michael Ellerman890274c2019-04-18 16:51:24 +100010#include <asm/mmu.h>
Ram Paicf43d3b2018-01-18 17:50:44 -080011#include <asm/setup.h>
Ram Pai92e3da32018-01-18 17:50:24 -080012#include <linux/pkeys.h>
Aneesh Kumar K.Vd3cd91f2020-07-09 08:59:36 +053013#include <linux/of_fdt.h>
14
Aneesh Kumar K.Vc529afd2020-07-09 08:59:33 +053015int num_pkey; /* Max number of pkeys supported */
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053016/*
17 * Keys marked in the reservation list cannot be allocated by userspace
18 */
Aneesh Kumar K.V3c8ab472020-07-09 08:59:34 +053019u32 reserved_allocation_mask __ro_after_init;
20
21/* Bits set for the initially allocated keys */
22static u32 initial_allocation_mask __ro_after_init;
23
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053024/*
25 * Even if we allocate keys with sys_pkey_alloc(), we need to make sure
26 * other thread still find the access denied using the same keys.
27 */
28static u64 default_amr = ~0x0UL;
29static u64 default_iamr = 0x5555555555555555UL;
Aneesh Kumar K.Ve0d8e992020-07-09 08:59:42 +053030u64 default_uamor __ro_after_init;
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053031/*
32 * Key used to implement PROT_EXEC mmap. Denies READ/WRITE
33 * We pick key 2 because 0 is special key and 1 is reserved as per ISA.
34 */
Breno Leitao71432ce2018-10-22 11:54:20 -030035static int execute_only_key = 2;
Aneesh Kumar K.V2daf2982020-07-09 08:59:38 +053036static bool pkey_execute_disable_supported;
Ram Pai92e3da32018-01-18 17:50:24 -080037
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053038
Ram Pai4d70b692018-01-18 17:50:27 -080039#define AMR_BITS_PER_PKEY 2
Ram Pai2ddc53f2018-01-18 17:50:29 -080040#define AMR_RD_BIT 0x1UL
41#define AMR_WR_BIT 0x2UL
42#define IAMR_EX_BIT 0x1UL
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053043#define PKEY_REG_BITS (sizeof(u64) * 8)
Ram Pai4d70b692018-01-18 17:50:27 -080044#define pkeyshift(pkey) (PKEY_REG_BITS - ((pkey+1) * AMR_BITS_PER_PKEY))
45
Aneesh Kumar K.Vd3cd91f2020-07-09 08:59:36 +053046static int __init dt_scan_storage_keys(unsigned long node,
47 const char *uname, int depth,
48 void *data)
49{
50 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
51 const __be32 *prop;
52 int *pkeys_total = (int *) data;
53
54 /* We are scanning "cpu" nodes only */
55 if (type == NULL || strcmp(type, "cpu") != 0)
56 return 0;
57
58 prop = of_get_flat_dt_prop(node, "ibm,processor-storage-keys", NULL);
59 if (!prop)
60 return 0;
61 *pkeys_total = be32_to_cpu(prop[0]);
62 return 1;
63}
64
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053065static int scan_pkey_feature(void)
Ram Paicf43d3b2018-01-18 17:50:44 -080066{
Aneesh Kumar K.Vd3cd91f2020-07-09 08:59:36 +053067 int ret;
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053068 int pkeys_total = 0;
Ram Paicf43d3b2018-01-18 17:50:44 -080069
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053070 /*
71 * Pkey is not supported with Radix translation.
72 */
Aneesh Kumar K.Vd3cd91f2020-07-09 08:59:36 +053073 if (early_radix_enabled())
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053074 return 0;
75
Aneesh Kumar K.Vd3cd91f2020-07-09 08:59:36 +053076 ret = of_scan_flat_dt(dt_scan_storage_keys, &pkeys_total);
77 if (ret == 0) {
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053078 /*
79 * Let's assume 32 pkeys on P8/P9 bare metal, if its not defined by device
80 * tree. We make this exception since some version of skiboot forgot to
81 * expose this property on power8/9.
82 */
83 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
84 unsigned long pvr = mfspr(SPRN_PVR);
85
86 if (PVR_VER(pvr) == PVR_POWER8 || PVR_VER(pvr) == PVR_POWER8E ||
87 PVR_VER(pvr) == PVR_POWER8NVL || PVR_VER(pvr) == PVR_POWER9)
88 pkeys_total = 32;
89 }
90 }
Ram Paicf43d3b2018-01-18 17:50:44 -080091
92 /*
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053093 * Adjust the upper limit, based on the number of bits supported by
94 * arch-neutral code.
Ram Paicf43d3b2018-01-18 17:50:44 -080095 */
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053096 pkeys_total = min_t(int, pkeys_total,
97 ((ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) + 1));
98 return pkeys_total;
Ram Paicf43d3b2018-01-18 17:50:44 -080099}
100
Aneesh Kumar K.Vd3cd91f2020-07-09 08:59:36 +0530101void __init pkey_early_init_devtree(void)
Ram Pai92e3da32018-01-18 17:50:24 -0800102{
Aneesh Kumar K.Vc529afd2020-07-09 08:59:33 +0530103 int pkeys_total, i;
Ram Pai4fb158f2018-01-18 17:50:25 -0800104
Ram Pai92e3da32018-01-18 17:50:24 -0800105 /*
Ram Paidcf87292018-01-18 17:50:30 -0800106 * We define PKEY_DISABLE_EXECUTE in addition to the arch-neutral
107 * generic defines for PKEY_DISABLE_ACCESS and PKEY_DISABLE_WRITE.
108 * Ensure that the bits a distinct.
109 */
110 BUILD_BUG_ON(PKEY_DISABLE_EXECUTE &
111 (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE));
112
113 /*
Ram Pai013a91b2018-01-18 17:50:33 -0800114 * pkey_to_vmflag_bits() assumes that the pkey bits are contiguous
115 * in the vmaflag. Make sure that is really the case.
116 */
117 BUILD_BUG_ON(__builtin_clzl(ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) +
118 __builtin_popcountl(ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT)
119 != (sizeof(u64) * BITS_PER_BYTE));
120
Ram Paicf43d3b2018-01-18 17:50:44 -0800121 /* scan the device tree for pkey feature */
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530122 pkeys_total = scan_pkey_feature();
Aneesh Kumar K.Ve0d8e992020-07-09 08:59:42 +0530123 if (!pkeys_total)
124 goto out;
125
126 /* Allow all keys to be modified by default */
127 default_uamor = ~0x0UL;
Ram Paicf43d3b2018-01-18 17:50:44 -0800128
Aneesh Kumar K.Vd3cd91f2020-07-09 08:59:36 +0530129 cur_cpu_spec->mmu_features |= MMU_FTR_PKEY;
130
Ram Pai92e3da32018-01-18 17:50:24 -0800131 /*
Ram Paicf43d3b2018-01-18 17:50:44 -0800132 * The device tree cannot be relied to indicate support for
133 * execute_disable support. Instead we use a PVR check.
Ram Pai92e3da32018-01-18 17:50:24 -0800134 */
Ram Paicf43d3b2018-01-18 17:50:44 -0800135 if (pvr_version_is(PVR_POWER7) || pvr_version_is(PVR_POWER7p))
136 pkey_execute_disable_supported = false;
137 else
138 pkey_execute_disable_supported = true;
Ram Pai4fb158f2018-01-18 17:50:25 -0800139
140#ifdef CONFIG_PPC_4K_PAGES
141 /*
142 * The OS can manage only 8 pkeys due to its inability to represent them
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530143 * in the Linux 4K PTE. Mark all other keys reserved.
Ram Pai4fb158f2018-01-18 17:50:25 -0800144 */
Aneesh Kumar K.Vc529afd2020-07-09 08:59:33 +0530145 num_pkey = min(8, pkeys_total);
Ram Pai4fb158f2018-01-18 17:50:25 -0800146#else
Aneesh Kumar K.Vc529afd2020-07-09 08:59:33 +0530147 num_pkey = pkeys_total;
Ram Pai4fb158f2018-01-18 17:50:25 -0800148#endif
Ram Paia57a04c2018-07-17 06:51:02 -0700149
Aneesh Kumar K.V2daf2982020-07-09 08:59:38 +0530150 if (unlikely(num_pkey <= execute_only_key) || !pkey_execute_disable_supported) {
Ram Paia4fcc872018-07-17 06:51:07 -0700151 /*
152 * Insufficient number of keys to support
153 * execute only key. Mark it unavailable.
Ram Paia4fcc872018-07-17 06:51:07 -0700154 */
155 execute_only_key = -1;
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530156 } else {
157 /*
158 * Mark the execute_only_pkey as not available for
159 * user allocation via pkey_alloc.
160 */
161 reserved_allocation_mask |= (0x1 << execute_only_key);
162
163 /*
164 * Deny READ/WRITE for execute_only_key.
165 * Allow execute in IAMR.
166 */
167 default_amr |= (0x3ul << pkeyshift(execute_only_key));
168 default_iamr &= ~(0x1ul << pkeyshift(execute_only_key));
169
170 /*
171 * Clear the uamor bits for this key.
172 */
173 default_uamor &= ~(0x3ul << pkeyshift(execute_only_key));
Ram Paia4fcc872018-07-17 06:51:07 -0700174 }
175
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530176 /*
177 * Allow access for only key 0. And prevent any other modification.
178 */
179 default_amr &= ~(0x3ul << pkeyshift(0));
180 default_iamr &= ~(0x1ul << pkeyshift(0));
181 default_uamor &= ~(0x3ul << pkeyshift(0));
182 /*
183 * key 0 is special in that we want to consider it an allocated
184 * key which is preallocated. We don't allow changing AMR bits
185 * w.r.t key 0. But one can pkey_free(key0)
186 */
187 initial_allocation_mask |= (0x1 << 0);
188
189 /*
190 * key 1 is recommended not to be used. PowerISA(3.0) page 1015,
191 * programming note.
192 */
193 reserved_allocation_mask |= (0x1 << 1);
Aneesh Kumar K.V718d9b32020-07-09 08:59:30 +0530194 default_uamor &= ~(0x3ul << pkeyshift(1));
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530195
196 /*
Aneesh Kumar K.Vc529afd2020-07-09 08:59:33 +0530197 * Prevent the usage of OS reserved keys. Update UAMOR
Aneesh Kumar K.V3e4352a2020-07-09 08:59:35 +0530198 * for those keys. Also mark the rest of the bits in the
199 * 32 bit mask as reserved.
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530200 */
Aneesh Kumar K.V3e4352a2020-07-09 08:59:35 +0530201 for (i = num_pkey; i < 32 ; i++) {
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530202 reserved_allocation_mask |= (0x1 << i);
203 default_uamor &= ~(0x3ul << pkeyshift(i));
204 }
205 /*
206 * Prevent the allocation of reserved keys too.
207 */
208 initial_allocation_mask |= reserved_allocation_mask;
209
Aneesh Kumar K.V7cdd3742020-07-09 08:59:40 +0530210 pr_info("Enabling pkeys with max key count %d\n", num_pkey);
Aneesh Kumar K.Ve0d8e992020-07-09 08:59:42 +0530211out:
212 /*
213 * Setup uamor on boot cpu
214 */
215 mtspr(SPRN_UAMOR, default_uamor);
216
Aneesh Kumar K.Vd3cd91f2020-07-09 08:59:36 +0530217 return;
Ram Pai92e3da32018-01-18 17:50:24 -0800218}
219
Ram Pai4fb158f2018-01-18 17:50:25 -0800220void pkey_mm_init(struct mm_struct *mm)
221{
Aneesh Kumar K.Vf7045a42020-07-09 08:59:39 +0530222 if (!mmu_has_feature(MMU_FTR_PKEY))
Ram Pai4fb158f2018-01-18 17:50:25 -0800223 return;
224 mm_pkey_allocation_map(mm) = initial_allocation_mask;
Ram Paia4fcc872018-07-17 06:51:07 -0700225 mm->context.execute_only_pkey = execute_only_key;
Ram Pai4fb158f2018-01-18 17:50:25 -0800226}
Ram Pai1b4037d2018-01-18 17:50:26 -0800227
228static inline u64 read_amr(void)
229{
230 return mfspr(SPRN_AMR);
231}
232
233static inline void write_amr(u64 value)
234{
235 mtspr(SPRN_AMR, value);
236}
237
238static inline u64 read_iamr(void)
239{
240 if (!likely(pkey_execute_disable_supported))
241 return 0x0UL;
242
243 return mfspr(SPRN_IAMR);
244}
245
246static inline void write_iamr(u64 value)
247{
248 if (!likely(pkey_execute_disable_supported))
249 return;
250
251 mtspr(SPRN_IAMR, value);
252}
253
254static inline u64 read_uamor(void)
255{
256 return mfspr(SPRN_UAMOR);
257}
258
Ram Pai2ddc53f2018-01-18 17:50:29 -0800259static bool is_pkey_enabled(int pkey)
260{
261 u64 uamor = read_uamor();
262 u64 pkey_bits = 0x3ul << pkeyshift(pkey);
263 u64 uamor_pkey_bits = (uamor & pkey_bits);
264
265 /*
266 * Both the bits in UAMOR corresponding to the key should be set or
267 * reset.
268 */
269 WARN_ON(uamor_pkey_bits && (uamor_pkey_bits != pkey_bits));
270 return !!(uamor_pkey_bits);
271}
272
Ram Pai4d70b692018-01-18 17:50:27 -0800273static inline void init_amr(int pkey, u8 init_bits)
274{
275 u64 new_amr_bits = (((u64)init_bits & 0x3UL) << pkeyshift(pkey));
276 u64 old_amr = read_amr() & ~((u64)(0x3ul) << pkeyshift(pkey));
277
278 write_amr(old_amr | new_amr_bits);
279}
280
281static inline void init_iamr(int pkey, u8 init_bits)
282{
283 u64 new_iamr_bits = (((u64)init_bits & 0x1UL) << pkeyshift(pkey));
284 u64 old_iamr = read_iamr() & ~((u64)(0x1ul) << pkeyshift(pkey));
285
286 write_iamr(old_iamr | new_iamr_bits);
287}
288
Ram Pai2ddc53f2018-01-18 17:50:29 -0800289/*
290 * Set the access rights in AMR IAMR and UAMOR registers for @pkey to that
291 * specified in @init_val.
292 */
293int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
294 unsigned long init_val)
295{
296 u64 new_amr_bits = 0x0ul;
Ram Paidcf87292018-01-18 17:50:30 -0800297 u64 new_iamr_bits = 0x0ul;
Ram Pai2ddc53f2018-01-18 17:50:29 -0800298
299 if (!is_pkey_enabled(pkey))
300 return -EINVAL;
301
Ram Paidcf87292018-01-18 17:50:30 -0800302 if (init_val & PKEY_DISABLE_EXECUTE) {
303 if (!pkey_execute_disable_supported)
304 return -EINVAL;
305 new_iamr_bits |= IAMR_EX_BIT;
306 }
307 init_iamr(pkey, new_iamr_bits);
308
Ram Pai2ddc53f2018-01-18 17:50:29 -0800309 /* Set the bits we need in AMR: */
310 if (init_val & PKEY_DISABLE_ACCESS)
311 new_amr_bits |= AMR_RD_BIT | AMR_WR_BIT;
312 else if (init_val & PKEY_DISABLE_WRITE)
313 new_amr_bits |= AMR_WR_BIT;
314
315 init_amr(pkey, new_amr_bits);
316 return 0;
317}
Ram Pai06bb53b2018-01-18 17:50:31 -0800318
319void thread_pkey_regs_save(struct thread_struct *thread)
320{
Aneesh Kumar K.Vf7045a42020-07-09 08:59:39 +0530321 if (!mmu_has_feature(MMU_FTR_PKEY))
Ram Pai06bb53b2018-01-18 17:50:31 -0800322 return;
323
324 /*
325 * TODO: Skip saving registers if @thread hasn't used any keys yet.
326 */
327 thread->amr = read_amr();
328 thread->iamr = read_iamr();
Ram Pai06bb53b2018-01-18 17:50:31 -0800329}
330
331void thread_pkey_regs_restore(struct thread_struct *new_thread,
332 struct thread_struct *old_thread)
333{
Aneesh Kumar K.Vf7045a42020-07-09 08:59:39 +0530334 if (!mmu_has_feature(MMU_FTR_PKEY))
Ram Pai06bb53b2018-01-18 17:50:31 -0800335 return;
336
Ram Pai06bb53b2018-01-18 17:50:31 -0800337 if (old_thread->amr != new_thread->amr)
338 write_amr(new_thread->amr);
339 if (old_thread->iamr != new_thread->iamr)
340 write_iamr(new_thread->iamr);
Ram Pai06bb53b2018-01-18 17:50:31 -0800341}
342
343void thread_pkey_regs_init(struct thread_struct *thread)
344{
Aneesh Kumar K.Vf7045a42020-07-09 08:59:39 +0530345 if (!mmu_has_feature(MMU_FTR_PKEY))
Ram Pai06bb53b2018-01-18 17:50:31 -0800346 return;
347
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530348 thread->amr = default_amr;
349 thread->iamr = default_iamr;
Ram Paia57a04c2018-07-17 06:51:02 -0700350
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530351 write_amr(default_amr);
352 write_iamr(default_iamr);
Ram Pai06bb53b2018-01-18 17:50:31 -0800353}
Ram Pai5586cf62018-01-18 17:50:32 -0800354
Aneesh Kumar K.V2daf2982020-07-09 08:59:38 +0530355int execute_only_pkey(struct mm_struct *mm)
Ram Pai5586cf62018-01-18 17:50:32 -0800356{
Ram Paia4fcc872018-07-17 06:51:07 -0700357 return mm->context.execute_only_pkey;
Ram Pai5586cf62018-01-18 17:50:32 -0800358}
Ram Pai87bbabb2018-01-18 17:50:34 -0800359
360static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma)
361{
362 /* Do this check first since the vm_flags should be hot */
Anshuman Khandual6cb4d9a2020-04-10 14:33:09 -0700363 if ((vma->vm_flags & VM_ACCESS_FLAGS) != VM_EXEC)
Ram Pai87bbabb2018-01-18 17:50:34 -0800364 return false;
365
366 return (vma_pkey(vma) == vma->vm_mm->context.execute_only_pkey);
367}
368
369/*
370 * This should only be called for *plain* mprotect calls.
371 */
372int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot,
373 int pkey)
374{
375 /*
376 * If the currently associated pkey is execute-only, but the requested
Ram Paieabdb8c2018-05-04 13:01:51 -0700377 * protection is not execute-only, move it back to the default pkey.
Ram Pai87bbabb2018-01-18 17:50:34 -0800378 */
Ram Paieabdb8c2018-05-04 13:01:51 -0700379 if (vma_is_pkey_exec_only(vma) && (prot != PROT_EXEC))
Ram Pai87bbabb2018-01-18 17:50:34 -0800380 return 0;
381
382 /*
383 * The requested protection is execute-only. Hence let's use an
384 * execute-only pkey.
385 */
386 if (prot == PROT_EXEC) {
387 pkey = execute_only_pkey(vma->vm_mm);
388 if (pkey > 0)
389 return pkey;
390 }
391
392 /* Nothing to override. */
393 return vma_pkey(vma);
394}
Ram Paif2407ef2018-01-18 17:50:37 -0800395
396static bool pkey_access_permitted(int pkey, bool write, bool execute)
397{
398 int pkey_shift;
399 u64 amr;
400
Ram Paif2407ef2018-01-18 17:50:37 -0800401 pkey_shift = pkeyshift(pkey);
Aneesh Kumar K.V192b6a72020-07-12 18:50:47 +0530402 if (execute)
403 return !(read_iamr() & (IAMR_EX_BIT << pkey_shift));
Ram Paif2407ef2018-01-18 17:50:37 -0800404
Aneesh Kumar K.V192b6a72020-07-12 18:50:47 +0530405 amr = read_amr();
406 if (write)
407 return !(amr & (AMR_WR_BIT << pkey_shift));
408
409 return !(amr & (AMR_RD_BIT << pkey_shift));
Ram Paif2407ef2018-01-18 17:50:37 -0800410}
411
412bool arch_pte_access_permitted(u64 pte, bool write, bool execute)
413{
Aneesh Kumar K.Vf7045a42020-07-09 08:59:39 +0530414 if (!mmu_has_feature(MMU_FTR_PKEY))
Ram Paif2407ef2018-01-18 17:50:37 -0800415 return true;
416
417 return pkey_access_permitted(pte_to_pkey_bits(pte), write, execute);
418}
Ram Pai11375732018-01-18 17:50:39 -0800419
420/*
421 * We only want to enforce protection keys on the current thread because we
422 * effectively have no access to AMR/IAMR for other threads or any way to tell
423 * which AMR/IAMR in a threaded process we could use.
424 *
425 * So do not enforce things if the VMA is not from the current mm, or if we are
426 * in a kernel thread.
427 */
Ram Pai11375732018-01-18 17:50:39 -0800428bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
429 bool execute, bool foreign)
430{
Aneesh Kumar K.Vf7045a42020-07-09 08:59:39 +0530431 if (!mmu_has_feature(MMU_FTR_PKEY))
Ram Pai11375732018-01-18 17:50:39 -0800432 return true;
433 /*
434 * Do not enforce our key-permissions on a foreign vma.
435 */
436 if (foreign || vma_is_foreign(vma))
437 return true;
438
439 return pkey_access_permitted(vma_pkey(vma), write, execute);
440}
Ram Pai2cd4bd12018-12-20 12:03:30 -0800441
442void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm)
443{
Aneesh Kumar K.Vf7045a42020-07-09 08:59:39 +0530444 if (!mmu_has_feature(MMU_FTR_PKEY))
Ram Pai2cd4bd12018-12-20 12:03:30 -0800445 return;
446
447 /* Duplicate the oldmm pkey state in mm: */
448 mm_pkey_allocation_map(mm) = mm_pkey_allocation_map(oldmm);
449 mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
450}