blob: b1d091a9761193211a4682201caa2428c18cadf9 [file] [log] [blame]
Ram Pai92e3da32018-01-18 17:50:24 -08001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * PowerPC Memory Protection Keys management
4 *
5 * Copyright 2017, Ram Pai, IBM Corporation.
6 */
7
Ram Pai2ddc53f2018-01-18 17:50:29 -08008#include <asm/mman.h>
Breno Leitao71432ce2018-10-22 11:54:20 -03009#include <asm/mmu_context.h>
Michael Ellerman890274c2019-04-18 16:51:24 +100010#include <asm/mmu.h>
Ram Paicf43d3b2018-01-18 17:50:44 -080011#include <asm/setup.h>
Ram Pai92e3da32018-01-18 17:50:24 -080012#include <linux/pkeys.h>
Aneesh Kumar K.Vd3cd91f2020-07-09 08:59:36 +053013#include <linux/of_fdt.h>
14
Aneesh Kumar K.Vc529afd2020-07-09 08:59:33 +053015int num_pkey; /* Max number of pkeys supported */
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053016/*
17 * Keys marked in the reservation list cannot be allocated by userspace
18 */
Aneesh Kumar K.V3c8ab472020-07-09 08:59:34 +053019u32 reserved_allocation_mask __ro_after_init;
20
21/* Bits set for the initially allocated keys */
22static u32 initial_allocation_mask __ro_after_init;
23
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053024/*
25 * Even if we allocate keys with sys_pkey_alloc(), we need to make sure
26 * other thread still find the access denied using the same keys.
27 */
28static u64 default_amr = ~0x0UL;
29static u64 default_iamr = 0x5555555555555555UL;
Aneesh Kumar K.Ve0d8e992020-07-09 08:59:42 +053030u64 default_uamor __ro_after_init;
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053031/*
32 * Key used to implement PROT_EXEC mmap. Denies READ/WRITE
33 * We pick key 2 because 0 is special key and 1 is reserved as per ISA.
34 */
Breno Leitao71432ce2018-10-22 11:54:20 -030035static int execute_only_key = 2;
Aneesh Kumar K.V2daf2982020-07-09 08:59:38 +053036static bool pkey_execute_disable_supported;
Ram Pai92e3da32018-01-18 17:50:24 -080037
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053038
Ram Pai4d70b692018-01-18 17:50:27 -080039#define AMR_BITS_PER_PKEY 2
Ram Pai2ddc53f2018-01-18 17:50:29 -080040#define AMR_RD_BIT 0x1UL
41#define AMR_WR_BIT 0x2UL
42#define IAMR_EX_BIT 0x1UL
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053043#define PKEY_REG_BITS (sizeof(u64) * 8)
Ram Pai4d70b692018-01-18 17:50:27 -080044#define pkeyshift(pkey) (PKEY_REG_BITS - ((pkey+1) * AMR_BITS_PER_PKEY))
45
Aneesh Kumar K.Vd3cd91f2020-07-09 08:59:36 +053046static int __init dt_scan_storage_keys(unsigned long node,
47 const char *uname, int depth,
48 void *data)
49{
50 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
51 const __be32 *prop;
52 int *pkeys_total = (int *) data;
53
54 /* We are scanning "cpu" nodes only */
55 if (type == NULL || strcmp(type, "cpu") != 0)
56 return 0;
57
58 prop = of_get_flat_dt_prop(node, "ibm,processor-storage-keys", NULL);
59 if (!prop)
60 return 0;
61 *pkeys_total = be32_to_cpu(prop[0]);
62 return 1;
63}
64
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053065static int scan_pkey_feature(void)
Ram Paicf43d3b2018-01-18 17:50:44 -080066{
Aneesh Kumar K.Vd3cd91f2020-07-09 08:59:36 +053067 int ret;
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053068 int pkeys_total = 0;
Ram Paicf43d3b2018-01-18 17:50:44 -080069
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053070 /*
71 * Pkey is not supported with Radix translation.
72 */
Aneesh Kumar K.Vd3cd91f2020-07-09 08:59:36 +053073 if (early_radix_enabled())
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053074 return 0;
75
Aneesh Kumar K.Vd3cd91f2020-07-09 08:59:36 +053076 ret = of_scan_flat_dt(dt_scan_storage_keys, &pkeys_total);
77 if (ret == 0) {
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053078 /*
79 * Let's assume 32 pkeys on P8/P9 bare metal, if its not defined by device
80 * tree. We make this exception since some version of skiboot forgot to
81 * expose this property on power8/9.
82 */
83 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
84 unsigned long pvr = mfspr(SPRN_PVR);
85
86 if (PVR_VER(pvr) == PVR_POWER8 || PVR_VER(pvr) == PVR_POWER8E ||
87 PVR_VER(pvr) == PVR_POWER8NVL || PVR_VER(pvr) == PVR_POWER9)
88 pkeys_total = 32;
89 }
90 }
Ram Paicf43d3b2018-01-18 17:50:44 -080091
92 /*
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053093 * Adjust the upper limit, based on the number of bits supported by
94 * arch-neutral code.
Ram Paicf43d3b2018-01-18 17:50:44 -080095 */
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +053096 pkeys_total = min_t(int, pkeys_total,
97 ((ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) + 1));
98 return pkeys_total;
Ram Paicf43d3b2018-01-18 17:50:44 -080099}
100
Aneesh Kumar K.Vd3cd91f2020-07-09 08:59:36 +0530101void __init pkey_early_init_devtree(void)
Ram Pai92e3da32018-01-18 17:50:24 -0800102{
Aneesh Kumar K.Vc529afd2020-07-09 08:59:33 +0530103 int pkeys_total, i;
Ram Pai4fb158f2018-01-18 17:50:25 -0800104
Ram Pai92e3da32018-01-18 17:50:24 -0800105 /*
Ram Paidcf87292018-01-18 17:50:30 -0800106 * We define PKEY_DISABLE_EXECUTE in addition to the arch-neutral
107 * generic defines for PKEY_DISABLE_ACCESS and PKEY_DISABLE_WRITE.
108 * Ensure that the bits a distinct.
109 */
110 BUILD_BUG_ON(PKEY_DISABLE_EXECUTE &
111 (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE));
112
113 /*
Ram Pai013a91b2018-01-18 17:50:33 -0800114 * pkey_to_vmflag_bits() assumes that the pkey bits are contiguous
115 * in the vmaflag. Make sure that is really the case.
116 */
117 BUILD_BUG_ON(__builtin_clzl(ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) +
118 __builtin_popcountl(ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT)
119 != (sizeof(u64) * BITS_PER_BYTE));
120
Aneesh Kumar K.V6553fb72020-08-10 15:56:23 +0530121 /*
122 * Only P7 and above supports SPRN_AMR update with MSR[PR] = 1
123 */
124 if (!early_cpu_has_feature(CPU_FTR_ARCH_206))
125 return;
126
Ram Paicf43d3b2018-01-18 17:50:44 -0800127 /* scan the device tree for pkey feature */
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530128 pkeys_total = scan_pkey_feature();
Aneesh Kumar K.Ve0d8e992020-07-09 08:59:42 +0530129 if (!pkeys_total)
130 goto out;
131
132 /* Allow all keys to be modified by default */
133 default_uamor = ~0x0UL;
Ram Paicf43d3b2018-01-18 17:50:44 -0800134
Aneesh Kumar K.Vd3cd91f2020-07-09 08:59:36 +0530135 cur_cpu_spec->mmu_features |= MMU_FTR_PKEY;
136
Ram Pai92e3da32018-01-18 17:50:24 -0800137 /*
Ram Paicf43d3b2018-01-18 17:50:44 -0800138 * The device tree cannot be relied to indicate support for
139 * execute_disable support. Instead we use a PVR check.
Ram Pai92e3da32018-01-18 17:50:24 -0800140 */
Ram Paicf43d3b2018-01-18 17:50:44 -0800141 if (pvr_version_is(PVR_POWER7) || pvr_version_is(PVR_POWER7p))
142 pkey_execute_disable_supported = false;
143 else
144 pkey_execute_disable_supported = true;
Ram Pai4fb158f2018-01-18 17:50:25 -0800145
146#ifdef CONFIG_PPC_4K_PAGES
147 /*
148 * The OS can manage only 8 pkeys due to its inability to represent them
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530149 * in the Linux 4K PTE. Mark all other keys reserved.
Ram Pai4fb158f2018-01-18 17:50:25 -0800150 */
Aneesh Kumar K.Vc529afd2020-07-09 08:59:33 +0530151 num_pkey = min(8, pkeys_total);
Ram Pai4fb158f2018-01-18 17:50:25 -0800152#else
Aneesh Kumar K.Vc529afd2020-07-09 08:59:33 +0530153 num_pkey = pkeys_total;
Ram Pai4fb158f2018-01-18 17:50:25 -0800154#endif
Ram Paia57a04c2018-07-17 06:51:02 -0700155
Aneesh Kumar K.V2daf2982020-07-09 08:59:38 +0530156 if (unlikely(num_pkey <= execute_only_key) || !pkey_execute_disable_supported) {
Ram Paia4fcc872018-07-17 06:51:07 -0700157 /*
158 * Insufficient number of keys to support
159 * execute only key. Mark it unavailable.
Ram Paia4fcc872018-07-17 06:51:07 -0700160 */
161 execute_only_key = -1;
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530162 } else {
163 /*
164 * Mark the execute_only_pkey as not available for
165 * user allocation via pkey_alloc.
166 */
167 reserved_allocation_mask |= (0x1 << execute_only_key);
168
169 /*
170 * Deny READ/WRITE for execute_only_key.
171 * Allow execute in IAMR.
172 */
173 default_amr |= (0x3ul << pkeyshift(execute_only_key));
174 default_iamr &= ~(0x1ul << pkeyshift(execute_only_key));
175
176 /*
177 * Clear the uamor bits for this key.
178 */
179 default_uamor &= ~(0x3ul << pkeyshift(execute_only_key));
Ram Paia4fcc872018-07-17 06:51:07 -0700180 }
181
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530182 /*
183 * Allow access for only key 0. And prevent any other modification.
184 */
185 default_amr &= ~(0x3ul << pkeyshift(0));
186 default_iamr &= ~(0x1ul << pkeyshift(0));
187 default_uamor &= ~(0x3ul << pkeyshift(0));
188 /*
189 * key 0 is special in that we want to consider it an allocated
190 * key which is preallocated. We don't allow changing AMR bits
191 * w.r.t key 0. But one can pkey_free(key0)
192 */
193 initial_allocation_mask |= (0x1 << 0);
194
195 /*
196 * key 1 is recommended not to be used. PowerISA(3.0) page 1015,
197 * programming note.
198 */
199 reserved_allocation_mask |= (0x1 << 1);
Aneesh Kumar K.V718d9b32020-07-09 08:59:30 +0530200 default_uamor &= ~(0x3ul << pkeyshift(1));
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530201
202 /*
Aneesh Kumar K.Vc529afd2020-07-09 08:59:33 +0530203 * Prevent the usage of OS reserved keys. Update UAMOR
Aneesh Kumar K.V3e4352a2020-07-09 08:59:35 +0530204 * for those keys. Also mark the rest of the bits in the
205 * 32 bit mask as reserved.
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530206 */
Aneesh Kumar K.V3e4352a2020-07-09 08:59:35 +0530207 for (i = num_pkey; i < 32 ; i++) {
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530208 reserved_allocation_mask |= (0x1 << i);
209 default_uamor &= ~(0x3ul << pkeyshift(i));
210 }
211 /*
212 * Prevent the allocation of reserved keys too.
213 */
214 initial_allocation_mask |= reserved_allocation_mask;
215
Aneesh Kumar K.V7cdd3742020-07-09 08:59:40 +0530216 pr_info("Enabling pkeys with max key count %d\n", num_pkey);
Aneesh Kumar K.Ve0d8e992020-07-09 08:59:42 +0530217out:
218 /*
219 * Setup uamor on boot cpu
220 */
221 mtspr(SPRN_UAMOR, default_uamor);
222
Aneesh Kumar K.Vd3cd91f2020-07-09 08:59:36 +0530223 return;
Ram Pai92e3da32018-01-18 17:50:24 -0800224}
225
Ram Pai4fb158f2018-01-18 17:50:25 -0800226void pkey_mm_init(struct mm_struct *mm)
227{
Aneesh Kumar K.Vf7045a42020-07-09 08:59:39 +0530228 if (!mmu_has_feature(MMU_FTR_PKEY))
Ram Pai4fb158f2018-01-18 17:50:25 -0800229 return;
230 mm_pkey_allocation_map(mm) = initial_allocation_mask;
Ram Paia4fcc872018-07-17 06:51:07 -0700231 mm->context.execute_only_pkey = execute_only_key;
Ram Pai4fb158f2018-01-18 17:50:25 -0800232}
Ram Pai1b4037d2018-01-18 17:50:26 -0800233
234static inline u64 read_amr(void)
235{
236 return mfspr(SPRN_AMR);
237}
238
239static inline void write_amr(u64 value)
240{
241 mtspr(SPRN_AMR, value);
242}
243
244static inline u64 read_iamr(void)
245{
246 if (!likely(pkey_execute_disable_supported))
247 return 0x0UL;
248
249 return mfspr(SPRN_IAMR);
250}
251
252static inline void write_iamr(u64 value)
253{
254 if (!likely(pkey_execute_disable_supported))
255 return;
256
257 mtspr(SPRN_IAMR, value);
258}
259
Ram Pai4d70b692018-01-18 17:50:27 -0800260static inline void init_amr(int pkey, u8 init_bits)
261{
262 u64 new_amr_bits = (((u64)init_bits & 0x3UL) << pkeyshift(pkey));
263 u64 old_amr = read_amr() & ~((u64)(0x3ul) << pkeyshift(pkey));
264
265 write_amr(old_amr | new_amr_bits);
266}
267
268static inline void init_iamr(int pkey, u8 init_bits)
269{
270 u64 new_iamr_bits = (((u64)init_bits & 0x1UL) << pkeyshift(pkey));
271 u64 old_iamr = read_iamr() & ~((u64)(0x1ul) << pkeyshift(pkey));
272
273 write_iamr(old_iamr | new_iamr_bits);
274}
275
Ram Pai2ddc53f2018-01-18 17:50:29 -0800276/*
277 * Set the access rights in AMR IAMR and UAMOR registers for @pkey to that
278 * specified in @init_val.
279 */
280int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
281 unsigned long init_val)
282{
283 u64 new_amr_bits = 0x0ul;
Ram Paidcf87292018-01-18 17:50:30 -0800284 u64 new_iamr_bits = 0x0ul;
Aneesh Kumar K.V482b9b32020-07-09 08:59:46 +0530285 u64 pkey_bits, uamor_pkey_bits;
Ram Pai2ddc53f2018-01-18 17:50:29 -0800286
Aneesh Kumar K.V482b9b32020-07-09 08:59:46 +0530287 /*
288 * Check whether the key is disabled by UAMOR.
289 */
290 pkey_bits = 0x3ul << pkeyshift(pkey);
291 uamor_pkey_bits = (default_uamor & pkey_bits);
292
293 /*
294 * Both the bits in UAMOR corresponding to the key should be set
295 */
296 if (uamor_pkey_bits != pkey_bits)
Ram Pai2ddc53f2018-01-18 17:50:29 -0800297 return -EINVAL;
298
Ram Paidcf87292018-01-18 17:50:30 -0800299 if (init_val & PKEY_DISABLE_EXECUTE) {
300 if (!pkey_execute_disable_supported)
301 return -EINVAL;
302 new_iamr_bits |= IAMR_EX_BIT;
303 }
304 init_iamr(pkey, new_iamr_bits);
305
Ram Pai2ddc53f2018-01-18 17:50:29 -0800306 /* Set the bits we need in AMR: */
307 if (init_val & PKEY_DISABLE_ACCESS)
308 new_amr_bits |= AMR_RD_BIT | AMR_WR_BIT;
309 else if (init_val & PKEY_DISABLE_WRITE)
310 new_amr_bits |= AMR_WR_BIT;
311
312 init_amr(pkey, new_amr_bits);
313 return 0;
314}
Ram Pai06bb53b2018-01-18 17:50:31 -0800315
316void thread_pkey_regs_save(struct thread_struct *thread)
317{
Aneesh Kumar K.Vf7045a42020-07-09 08:59:39 +0530318 if (!mmu_has_feature(MMU_FTR_PKEY))
Ram Pai06bb53b2018-01-18 17:50:31 -0800319 return;
320
321 /*
322 * TODO: Skip saving registers if @thread hasn't used any keys yet.
323 */
324 thread->amr = read_amr();
325 thread->iamr = read_iamr();
Ram Pai06bb53b2018-01-18 17:50:31 -0800326}
327
328void thread_pkey_regs_restore(struct thread_struct *new_thread,
329 struct thread_struct *old_thread)
330{
Aneesh Kumar K.Vf7045a42020-07-09 08:59:39 +0530331 if (!mmu_has_feature(MMU_FTR_PKEY))
Ram Pai06bb53b2018-01-18 17:50:31 -0800332 return;
333
Ram Pai06bb53b2018-01-18 17:50:31 -0800334 if (old_thread->amr != new_thread->amr)
335 write_amr(new_thread->amr);
336 if (old_thread->iamr != new_thread->iamr)
337 write_iamr(new_thread->iamr);
Ram Pai06bb53b2018-01-18 17:50:31 -0800338}
339
340void thread_pkey_regs_init(struct thread_struct *thread)
341{
Aneesh Kumar K.Vf7045a42020-07-09 08:59:39 +0530342 if (!mmu_has_feature(MMU_FTR_PKEY))
Ram Pai06bb53b2018-01-18 17:50:31 -0800343 return;
344
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530345 thread->amr = default_amr;
346 thread->iamr = default_iamr;
Ram Paia57a04c2018-07-17 06:51:02 -0700347
Aneesh Kumar K.Vf491fe32020-07-09 08:59:29 +0530348 write_amr(default_amr);
349 write_iamr(default_iamr);
Ram Pai06bb53b2018-01-18 17:50:31 -0800350}
Ram Pai5586cf62018-01-18 17:50:32 -0800351
Aneesh Kumar K.V2daf2982020-07-09 08:59:38 +0530352int execute_only_pkey(struct mm_struct *mm)
Ram Pai5586cf62018-01-18 17:50:32 -0800353{
Ram Paia4fcc872018-07-17 06:51:07 -0700354 return mm->context.execute_only_pkey;
Ram Pai5586cf62018-01-18 17:50:32 -0800355}
Ram Pai87bbabb2018-01-18 17:50:34 -0800356
357static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma)
358{
359 /* Do this check first since the vm_flags should be hot */
Anshuman Khandual6cb4d9a2020-04-10 14:33:09 -0700360 if ((vma->vm_flags & VM_ACCESS_FLAGS) != VM_EXEC)
Ram Pai87bbabb2018-01-18 17:50:34 -0800361 return false;
362
363 return (vma_pkey(vma) == vma->vm_mm->context.execute_only_pkey);
364}
365
366/*
367 * This should only be called for *plain* mprotect calls.
368 */
369int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot,
370 int pkey)
371{
372 /*
373 * If the currently associated pkey is execute-only, but the requested
Ram Paieabdb8c2018-05-04 13:01:51 -0700374 * protection is not execute-only, move it back to the default pkey.
Ram Pai87bbabb2018-01-18 17:50:34 -0800375 */
Ram Paieabdb8c2018-05-04 13:01:51 -0700376 if (vma_is_pkey_exec_only(vma) && (prot != PROT_EXEC))
Ram Pai87bbabb2018-01-18 17:50:34 -0800377 return 0;
378
379 /*
380 * The requested protection is execute-only. Hence let's use an
381 * execute-only pkey.
382 */
383 if (prot == PROT_EXEC) {
384 pkey = execute_only_pkey(vma->vm_mm);
385 if (pkey > 0)
386 return pkey;
387 }
388
389 /* Nothing to override. */
390 return vma_pkey(vma);
391}
Ram Paif2407ef2018-01-18 17:50:37 -0800392
393static bool pkey_access_permitted(int pkey, bool write, bool execute)
394{
395 int pkey_shift;
396 u64 amr;
397
Ram Paif2407ef2018-01-18 17:50:37 -0800398 pkey_shift = pkeyshift(pkey);
Aneesh Kumar K.V192b6a72020-07-12 18:50:47 +0530399 if (execute)
400 return !(read_iamr() & (IAMR_EX_BIT << pkey_shift));
Ram Paif2407ef2018-01-18 17:50:37 -0800401
Aneesh Kumar K.V192b6a72020-07-12 18:50:47 +0530402 amr = read_amr();
403 if (write)
404 return !(amr & (AMR_WR_BIT << pkey_shift));
405
406 return !(amr & (AMR_RD_BIT << pkey_shift));
Ram Paif2407ef2018-01-18 17:50:37 -0800407}
408
409bool arch_pte_access_permitted(u64 pte, bool write, bool execute)
410{
Aneesh Kumar K.Vf7045a42020-07-09 08:59:39 +0530411 if (!mmu_has_feature(MMU_FTR_PKEY))
Ram Paif2407ef2018-01-18 17:50:37 -0800412 return true;
413
414 return pkey_access_permitted(pte_to_pkey_bits(pte), write, execute);
415}
Ram Pai11375732018-01-18 17:50:39 -0800416
417/*
418 * We only want to enforce protection keys on the current thread because we
419 * effectively have no access to AMR/IAMR for other threads or any way to tell
420 * which AMR/IAMR in a threaded process we could use.
421 *
422 * So do not enforce things if the VMA is not from the current mm, or if we are
423 * in a kernel thread.
424 */
Ram Pai11375732018-01-18 17:50:39 -0800425bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
426 bool execute, bool foreign)
427{
Aneesh Kumar K.Vf7045a42020-07-09 08:59:39 +0530428 if (!mmu_has_feature(MMU_FTR_PKEY))
Ram Pai11375732018-01-18 17:50:39 -0800429 return true;
430 /*
431 * Do not enforce our key-permissions on a foreign vma.
432 */
433 if (foreign || vma_is_foreign(vma))
434 return true;
435
436 return pkey_access_permitted(vma_pkey(vma), write, execute);
437}
Ram Pai2cd4bd12018-12-20 12:03:30 -0800438
439void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm)
440{
Aneesh Kumar K.Vf7045a42020-07-09 08:59:39 +0530441 if (!mmu_has_feature(MMU_FTR_PKEY))
Ram Pai2cd4bd12018-12-20 12:03:30 -0800442 return;
443
444 /* Duplicate the oldmm pkey state in mm: */
445 mm_pkey_allocation_map(mm) = mm_pkey_allocation_map(oldmm);
446 mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
447}