blob: 0ba30b8b935bcf2a0251185b2655555542c053c0 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Paul Mackerras14cf11a2005-09-26 16:04:21 +10002/*
3 * MMU context allocation for 64-bit kernels.
4 *
5 * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
Paul Mackerras14cf11a2005-09-26 16:04:21 +10006 */
7
Paul Mackerras14cf11a2005-09-26 16:04:21 +10008#include <linux/sched.h>
9#include <linux/kernel.h>
10#include <linux/errno.h>
11#include <linux/string.h>
12#include <linux/types.h>
13#include <linux/mm.h>
Ram Pai4fb158f2018-01-18 17:50:25 -080014#include <linux/pkeys.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100015#include <linux/spinlock.h>
16#include <linux/idr.h>
Paul Gortmaker4b16f8e2011-07-22 18:24:23 -040017#include <linux/export.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/gfp.h>
Tseng-Hui (Frank) Lin851d2e22011-05-02 20:43:04 +000019#include <linux/slab.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100020
21#include <asm/mmu_context.h>
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +000022#include <asm/pgalloc.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100023
Anton Blanchard7317ac82010-02-07 12:30:12 +000024static DEFINE_IDA(mmu_context_ida);
Paul Mackerras14cf11a2005-09-26 16:04:21 +100025
Michael Ellermanc1ff8402017-03-29 22:10:45 +110026static int alloc_context_id(int min_id, int max_id)
Paul Mackerras14cf11a2005-09-26 16:04:21 +100027{
Matthew Wilcoxb3fa6412018-06-18 08:26:32 -040028 return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL);
Alexander Grafe85a4712009-11-02 12:02:30 +000029}
Michael Ellermana336f2f2017-03-29 22:00:46 +110030
Aneesh Kumar K.V82228e32017-03-22 09:07:00 +053031void hash__reserve_context_id(int id)
32{
Matthew Wilcoxb3fa6412018-06-18 08:26:32 -040033 int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL);
Aneesh Kumar K.V82228e32017-03-22 09:07:00 +053034
35 WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
36}
37
Michael Ellermana336f2f2017-03-29 22:00:46 +110038int hash__alloc_context_id(void)
39{
Aneesh Kumar K.Ve6f81a92017-03-29 17:21:53 +110040 unsigned long max;
41
42 if (mmu_has_feature(MMU_FTR_68_BIT_VA))
43 max = MAX_USER_CONTEXT;
44 else
45 max = MAX_USER_CONTEXT_65BIT_VA;
46
47 return alloc_context_id(MIN_USER_CONTEXT, max);
Michael Ellermana336f2f2017-03-29 22:00:46 +110048}
49EXPORT_SYMBOL_GPL(hash__alloc_context_id);
50
Nicholas Piggin5434ae72018-09-15 01:30:56 +100051void slb_setup_new_exec(void);
52
Michael Ellerman65565a62019-06-13 15:00:34 +100053static int realloc_context_ids(mm_context_t *ctx)
54{
55 int i, id;
56
57 /*
58 * id 0 (aka. ctx->id) is special, we always allocate a new one, even if
59 * there wasn't one allocated previously (which happens in the exec
60 * case where ctx is newly allocated).
61 *
62 * We have to be a bit careful here. We must keep the existing ids in
63 * the array, so that we can test if they're non-zero to decide if we
64 * need to allocate a new one. However in case of error we must free the
65 * ids we've allocated but *not* any of the existing ones (or risk a
66 * UAF). That's why we decrement i at the start of the error handling
67 * loop, to skip the id that we just tested but couldn't reallocate.
68 */
69 for (i = 0; i < ARRAY_SIZE(ctx->extended_id); i++) {
70 if (i == 0 || ctx->extended_id[i]) {
71 id = hash__alloc_context_id();
72 if (id < 0)
73 goto error;
74
75 ctx->extended_id[i] = id;
76 }
77 }
78
79 /* The caller expects us to return id */
80 return ctx->id;
81
82error:
83 for (i--; i >= 0; i--) {
84 if (ctx->extended_id[i])
85 ida_free(&mmu_context_ida, ctx->extended_id[i]);
86 }
87
88 return id;
89}
90
Michael Ellerman760573c2017-03-29 22:36:56 +110091static int hash__init_new_context(struct mm_struct *mm)
Alexander Grafe85a4712009-11-02 12:02:30 +000092{
93 int index;
94
Aneesh Kumar K.Vef629cc2019-04-17 18:33:51 +053095 mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context),
96 GFP_KERNEL);
Michael Ellerman65565a62019-06-13 15:00:34 +100097 if (!mm->context.hash_context)
Aneesh Kumar K.V70110182019-04-17 18:33:50 +053098 return -ENOMEM;
Aneesh Kumar K.V70110182019-04-17 18:33:50 +053099
Michael Ellerman760573c2017-03-29 22:36:56 +1100100 /*
101 * The old code would re-promote on fork, we don't do that when using
102 * slices as it could cause problem promoting slices that have been
103 * forced down to 4K.
104 *
105 * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check
106 * explicitly against context.id == 0. This ensures that we properly
107 * initialize context slice details for newly allocated mm's (which will
108 * have id == 0) and don't alter context slice inherited via fork (which
109 * will have id != 0).
110 *
111 * We should not be calling init_new_context() on init_mm. Hence a
112 * check against 0 is OK.
113 */
Aneesh Kumar K.V70110182019-04-17 18:33:50 +0530114 if (mm->context.id == 0) {
115 memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context));
Nicholas Piggin1753dd12018-03-07 11:37:09 +1000116 slice_init_new_context_exec(mm);
Aneesh Kumar K.V70110182019-04-17 18:33:50 +0530117 } else {
118 /* This is fork. Copy hash_context details from current->mm */
119 memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context));
Aneesh Kumar K.Vef629cc2019-04-17 18:33:51 +0530120#ifdef CONFIG_PPC_SUBPAGE_PROT
121 /* inherit subpage prot detalis if we have one. */
122 if (current->mm->context.hash_context->spt) {
123 mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table),
124 GFP_KERNEL);
125 if (!mm->context.hash_context->spt) {
Aneesh Kumar K.Vef629cc2019-04-17 18:33:51 +0530126 kfree(mm->context.hash_context);
127 return -ENOMEM;
128 }
129 }
130#endif
Michael Ellerman65565a62019-06-13 15:00:34 +1000131 }
Aneesh Kumar K.V70110182019-04-17 18:33:50 +0530132
Michael Ellerman65565a62019-06-13 15:00:34 +1000133 index = realloc_context_ids(&mm->context);
134 if (index < 0) {
135#ifdef CONFIG_PPC_SUBPAGE_PROT
136 kfree(mm->context.hash_context->spt);
137#endif
138 kfree(mm->context.hash_context);
139 return index;
Aneesh Kumar K.V70110182019-04-17 18:33:50 +0530140 }
Aneesh Kumar K.V7e381c02016-04-29 23:26:02 +1000141
Ram Pai4fb158f2018-01-18 17:50:25 -0800142 pkey_mm_init(mm);
Michael Ellerman760573c2017-03-29 22:36:56 +1100143 return index;
144}
145
Nicholas Piggin425d3312018-09-15 01:30:55 +1000146void hash__setup_new_exec(void)
147{
148 slice_setup_new_exec();
Nicholas Piggin5434ae72018-09-15 01:30:56 +1000149
150 slb_setup_new_exec();
Nicholas Piggin425d3312018-09-15 01:30:55 +1000151}
152
Michael Ellerman760573c2017-03-29 22:36:56 +1100153static int radix__init_new_context(struct mm_struct *mm)
154{
155 unsigned long rts_field;
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000156 int index, max_id;
Michael Ellerman760573c2017-03-29 22:36:56 +1100157
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000158 max_id = (1 << mmu_pid_bits) - 1;
159 index = alloc_context_id(mmu_base_pid, max_id);
Michael Ellerman760573c2017-03-29 22:36:56 +1100160 if (index < 0)
161 return index;
162
163 /*
164 * set the process table entry,
165 */
166 rts_field = radix__get_tree_size();
167 process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
168
Benjamin Herrenschmidt3a6a0472017-07-07 16:12:16 -0500169 /*
170 * Order the above store with subsequent update of the PID
171 * register (at which point HW can start loading/caching
172 * the entry) and the corresponding load by the MMU from
173 * the L2 cache.
174 */
175 asm volatile("ptesync;isync" : : : "memory");
176
Aneesh Kumar K.V70110182019-04-17 18:33:50 +0530177 mm->context.hash_context = NULL;
Alistair Popple1ab66d12017-04-03 19:51:44 +1000178
Michael Ellerman760573c2017-03-29 22:36:56 +1100179 return index;
180}
181
182int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
183{
184 int index;
185
186 if (radix_enabled())
187 index = radix__init_new_context(mm);
188 else
189 index = hash__init_new_context(mm);
190
191 if (index < 0)
192 return index;
193
Stephen Rothwell9dfe5c532007-08-15 16:33:55 +1000194 mm->context.id = index;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000195
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000196 mm->context.pte_frag = NULL;
Aneesh Kumar K.V8a6c6972018-04-16 16:57:22 +0530197 mm->context.pmd_frag = NULL;
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000198#ifdef CONFIG_SPAPR_TCE_IOMMU
Alexey Kardashevskiy88f54a32016-11-30 17:51:59 +1100199 mm_iommu_init(mm);
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000200#endif
Benjamin Herrenschmidta619e592017-07-24 14:28:02 +1000201 atomic_set(&mm->context.active_cpus, 0);
Benjamin Herrenschmidtaff6f8c2018-03-23 09:29:05 +1100202 atomic_set(&mm->context.copros, 0);
Benjamin Herrenschmidta619e592017-07-24 14:28:02 +1000203
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000204 return 0;
205}
206
Alexander Grafe85a4712009-11-02 12:02:30 +0000207void __destroy_context(int context_id)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000208{
Matthew Wilcoxb3fa6412018-06-18 08:26:32 -0400209 ida_free(&mmu_context_ida, context_id);
Alexander Grafe85a4712009-11-02 12:02:30 +0000210}
211EXPORT_SYMBOL_GPL(__destroy_context);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000212
Aneesh Kumar K.Vf384796c2018-03-26 15:34:48 +0530213static void destroy_contexts(mm_context_t *ctx)
214{
215 int index, context_id;
216
Aneesh Kumar K.Vf384796c2018-03-26 15:34:48 +0530217 for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
218 context_id = ctx->extended_id[index];
219 if (context_id)
Matthew Wilcoxb3fa6412018-06-18 08:26:32 -0400220 ida_free(&mmu_context_ida, context_id);
Aneesh Kumar K.Vf384796c2018-03-26 15:34:48 +0530221 }
Aneesh Kumar K.V70110182019-04-17 18:33:50 +0530222 kfree(ctx->hash_context);
Aneesh Kumar K.Vf384796c2018-03-26 15:34:48 +0530223}
224
Aneesh Kumar K.V8a6c6972018-04-16 16:57:22 +0530225static void pmd_frag_destroy(void *pmd_frag)
226{
227 int count;
228 struct page *page;
229
230 page = virt_to_page(pmd_frag);
231 /* drop all the pending references */
232 count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT;
233 /* We allow PTE_FRAG_NR fragments from a PTE page */
Nicholas Piggin4231aba2018-07-27 21:48:17 +1000234 if (atomic_sub_and_test(PMD_FRAG_NR - count, &page->pt_frag_refcount)) {
Aneesh Kumar K.V8a6c6972018-04-16 16:57:22 +0530235 pgtable_pmd_page_dtor(page);
Nicholas Piggin4231aba2018-07-27 21:48:17 +1000236 __free_page(page);
Aneesh Kumar K.V8a6c6972018-04-16 16:57:22 +0530237 }
238}
239
Nicholas Piggin34c604d2018-07-25 19:54:28 +1000240static void destroy_pagetable_cache(struct mm_struct *mm)
Aneesh Kumar K.V8a6c6972018-04-16 16:57:22 +0530241{
242 void *frag;
243
244 frag = mm->context.pte_frag;
245 if (frag)
246 pte_frag_destroy(frag);
247
248 frag = mm->context.pmd_frag;
249 if (frag)
250 pmd_frag_destroy(frag);
251 return;
252}
253
Alexander Grafe85a4712009-11-02 12:02:30 +0000254void destroy_context(struct mm_struct *mm)
255{
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000256#ifdef CONFIG_SPAPR_TCE_IOMMU
Alexey Kardashevskiy4b6fad72016-11-30 17:52:05 +1100257 WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000258#endif
Aneesh Kumar K.V7aec5842019-09-18 19:31:03 +0530259 /*
260 * For tasks which were successfully initialized we end up calling
261 * arch_exit_mmap() which clears the process table entry. And
262 * arch_exit_mmap() is called before the required fullmm TLB flush
263 * which does a RIC=2 flush. Hence for an initialized task, we do clear
264 * any cached process table entries.
265 *
266 * The condition below handles the error case during task init. We have
267 * set the process table entry early and if we fail a task
268 * initialization, we need to ensure the process table entry is zeroed.
269 * We need not worry about process table entry caches because the task
270 * never ran with the PID value.
271 */
Nicholas Piggin30b49ec2017-10-24 23:06:54 +1000272 if (radix_enabled())
Aneesh Kumar K.V7aec5842019-09-18 19:31:03 +0530273 process_tb[mm->context.id].prtb0 = 0;
Nicholas Piggin30b49ec2017-10-24 23:06:54 +1000274 else
275 subpage_prot_free(mm);
Aneesh Kumar K.Vf384796c2018-03-26 15:34:48 +0530276 destroy_contexts(&mm->context);
Nicholas Piggin30b49ec2017-10-24 23:06:54 +1000277 mm->context.id = MMU_NO_CONTEXT;
278}
279
280void arch_exit_mmap(struct mm_struct *mm)
281{
Nicholas Piggin34c604d2018-07-25 19:54:28 +1000282 destroy_pagetable_cache(mm);
283
Benjamin Herrenschmidtc6bb0b82017-07-08 07:45:32 -0500284 if (radix_enabled()) {
285 /*
286 * Radix doesn't have a valid bit in the process table
287 * entries. However we know that at least P9 implementation
288 * will avoid caching an entry with an invalid RTS field,
289 * and 0 is invalid. So this will do.
Nicholas Piggin30b49ec2017-10-24 23:06:54 +1000290 *
291 * This runs before the "fullmm" tlb flush in exit_mmap,
292 * which does a RIC=2 tlbie to clear the process table
293 * entry. See the "fullmm" comments in tlb-radix.c.
294 *
295 * No barrier required here after the store because
296 * this process will do the invalidate, which starts with
297 * ptesync.
Benjamin Herrenschmidtc6bb0b82017-07-08 07:45:32 -0500298 */
299 process_tb[mm->context.id].prtb0 = 0;
Nicholas Piggin30b49ec2017-10-24 23:06:54 +1000300 }
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000301}
Aneesh Kumar K.V7e381c02016-04-29 23:26:02 +1000302
303#ifdef CONFIG_PPC_RADIX_MMU
304void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
305{
Nicholas Piggin2bf10712018-07-05 18:47:00 +1000306 mtspr(SPRN_PID, next->context.id);
307 isync();
Aneesh Kumar K.V7e381c02016-04-29 23:26:02 +1000308}
309#endif