blob: 0c76675394c5930d5cecf2ac01a2718c8e9b6c1f [file] [log] [blame]
Paul Mackerras047ea782005-11-19 20:17:32 +11001#ifndef __ASM_POWERPC_MMU_CONTEXT_H
2#define __ASM_POWERPC_MMU_CONTEXT_H
Arnd Bergmann88ced032005-12-16 22:43:46 +01003#ifdef __KERNEL__
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
Benjamin Herrenschmidt5e696612008-12-18 19:13:24 +00005#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/sched.h>
8#include <linux/spinlock.h>
Kumar Gala80a7cc62007-07-03 03:22:05 -05009#include <asm/mmu.h>
10#include <asm/cputable.h>
Benjamin Herrenschmidt5e696612008-12-18 19:13:24 +000011#include <asm/cputhreads.h>
Kumar Gala80a7cc62007-07-03 03:22:05 -050012
13/*
Benjamin Herrenschmidt5e696612008-12-18 19:13:24 +000014 * Most if the context management is out of line
Kumar Gala80a7cc62007-07-03 03:22:05 -050015 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070016extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
17extern void destroy_context(struct mm_struct *mm);
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +100018#ifdef CONFIG_SPAPR_TCE_IOMMU
19struct mm_iommu_table_group_mem_t;
20
Balbir Singh2e5bbb52016-09-06 16:27:31 +100021extern int isolate_lru_page(struct page *page); /* from internal.h */
Alexey Kardashevskiyd7baee62016-11-30 17:52:00 +110022extern bool mm_iommu_preregistered(struct mm_struct *mm);
23extern long mm_iommu_get(struct mm_struct *mm,
24 unsigned long ua, unsigned long entries,
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +100025 struct mm_iommu_table_group_mem_t **pmem);
Alexey Kardashevskiyd7baee62016-11-30 17:52:00 +110026extern long mm_iommu_put(struct mm_struct *mm,
27 struct mm_iommu_table_group_mem_t *mem);
Alexey Kardashevskiy88f54a32016-11-30 17:51:59 +110028extern void mm_iommu_init(struct mm_struct *mm);
29extern void mm_iommu_cleanup(struct mm_struct *mm);
Alexey Kardashevskiyd7baee62016-11-30 17:52:00 +110030extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
31 unsigned long ua, unsigned long size);
Alexey Kardashevskiy6b5c19c2017-03-22 15:21:47 +110032extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
33 struct mm_struct *mm, unsigned long ua, unsigned long size);
Alexey Kardashevskiyd7baee62016-11-30 17:52:00 +110034extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
35 unsigned long ua, unsigned long entries);
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +100036extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
37 unsigned long ua, unsigned long *hpa);
Alexey Kardashevskiy6b5c19c2017-03-22 15:21:47 +110038extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
39 unsigned long ua, unsigned long *hpa);
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +100040extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
41extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
42#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070043extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
Benjamin Herrenschmidt5e696612008-12-18 19:13:24 +000044extern void set_context(unsigned long id, pgd_t *pgd);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Benjamin Herrenschmidt6f0ef0f2009-07-23 23:15:26 +000046#ifdef CONFIG_PPC_BOOK3S_64
Aneesh Kumar K.V7e381c02016-04-29 23:26:02 +100047extern void radix__switch_mmu_context(struct mm_struct *prev,
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100048 struct mm_struct *next);
Aneesh Kumar K.Vd2adba32016-04-29 23:26:01 +100049static inline void switch_mmu_context(struct mm_struct *prev,
50 struct mm_struct *next,
51 struct task_struct *tsk)
52{
Aneesh Kumar K.V7e381c02016-04-29 23:26:02 +100053 if (radix_enabled())
54 return radix__switch_mmu_context(prev, next);
Aneesh Kumar K.Vd2adba32016-04-29 23:26:01 +100055 return switch_slb(tsk, next);
56}
57
Michael Ellermana336f2f2017-03-29 22:00:46 +110058extern int hash__alloc_context_id(void);
Aneesh Kumar K.V82228e32017-03-22 09:07:00 +053059extern void hash__reserve_context_id(int id);
Alexander Grafe85a4712009-11-02 12:02:30 +000060extern void __destroy_context(int context_id);
Benjamin Herrenschmidt6f0ef0f2009-07-23 23:15:26 +000061static inline void mmu_context_init(void) { }
62#else
Aneesh Kumar K.Vd2adba32016-04-29 23:26:01 +100063extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
64 struct task_struct *tsk);
Alexander Grafc83ec262010-04-16 00:11:36 +020065extern unsigned long __init_new_context(void);
66extern void __destroy_context(unsigned long context_id);
Benjamin Herrenschmidt6f0ef0f2009-07-23 23:15:26 +000067extern void mmu_context_init(void);
68#endif
69
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100070#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
71extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
72#else
73static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
74#endif
75
Tseng-Hui (Frank) Lin851d2e22011-05-02 20:43:04 +000076extern void switch_cop(struct mm_struct *next);
77extern int use_cop(unsigned long acop, struct mm_struct *mm);
78extern void drop_cop(unsigned long acop, struct mm_struct *mm);
79
Linus Torvalds1da177e2005-04-16 15:20:36 -070080/*
81 * switch_mm is the entry point called from the architecture independent
Viresh Kumar0a0fca92013-06-04 13:10:24 +053082 * code in kernel/sched/core.c
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 */
David Gibson9765ad12017-04-19 16:38:26 +100084static inline void switch_mm_irqs_off(struct mm_struct *prev,
85 struct mm_struct *next,
86 struct task_struct *tsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087{
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100088 bool new_on_cpu = false;
89
Benjamin Herrenschmidt5e696612008-12-18 19:13:24 +000090 /* Mark this context has been used on the new CPU */
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100091 if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
Anton Blanchardbb85fb52016-10-03 17:40:29 +110092 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100093 new_on_cpu = true;
94 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
Benjamin Herrenschmidt5e696612008-12-18 19:13:24 +000096 /* 32-bit keeps track of the current PGDIR in the thread struct */
97#ifdef CONFIG_PPC32
98 tsk->thread.pgdir = next->pgd;
99#endif /* CONFIG_PPC32 */
100
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +0000101 /* 64-bit Book3E keeps track of current PGD in the PACA */
102#ifdef CONFIG_PPC_BOOK3E_64
103 get_paca()->pgd = next->pgd;
104#endif
Benjamin Herrenschmidt5e696612008-12-18 19:13:24 +0000105 /* Nothing else to do if we aren't actually switching */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 if (prev == next)
107 return;
108
Tseng-Hui (Frank) Lin851d2e22011-05-02 20:43:04 +0000109#ifdef CONFIG_PPC_ICSWX
110 /* Switch coprocessor context only if prev or next uses a coprocessor */
111 if (prev->context.acop || next->context.acop)
112 switch_cop(next);
113#endif /* CONFIG_PPC_ICSWX */
114
Benjamin Herrenschmidt5e696612008-12-18 19:13:24 +0000115 /* We must stop all altivec streams before changing the HW
116 * context
117 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118#ifdef CONFIG_ALTIVEC
119 if (cpu_has_feature(CPU_FTR_ALTIVEC))
120 asm volatile ("dssall");
121#endif /* CONFIG_ALTIVEC */
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000122
123 if (new_on_cpu)
124 radix_kvm_prefetch_workaround(next);
125
Aneesh Kumar K.Vd2adba32016-04-29 23:26:01 +1000126 /*
127 * The actual HW switching method differs between the various
128 * sub architectures. Out of line for now
Benjamin Herrenschmidt5e696612008-12-18 19:13:24 +0000129 */
Aneesh Kumar K.Vd2adba32016-04-29 23:26:01 +1000130 switch_mmu_context(prev, next, tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131}
132
David Gibson9765ad12017-04-19 16:38:26 +1000133static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
134 struct task_struct *tsk)
135{
136 unsigned long flags;
137
138 local_irq_save(flags);
139 switch_mm_irqs_off(prev, next, tsk);
140 local_irq_restore(flags);
141}
142#define switch_mm_irqs_off switch_mm_irqs_off
143
144
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145#define deactivate_mm(tsk,mm) do { } while (0)
146
147/*
148 * After we have set current->mm to a new value, this activates
149 * the context for the new mm so we see the new mappings.
150 */
151static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
152{
153 unsigned long flags;
154
155 local_irq_save(flags);
156 switch_mm(prev, next, current);
157 local_irq_restore(flags);
158}
159
Benjamin Herrenschmidt5e696612008-12-18 19:13:24 +0000160/* We don't currently use enter_lazy_tlb() for anything */
161static inline void enter_lazy_tlb(struct mm_struct *mm,
162 struct task_struct *tsk)
163{
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +0000164 /* 64-bit Book3E keeps track of current PGD in the PACA */
165#ifdef CONFIG_PPC_BOOK3E_64
166 get_paca()->pgd = NULL;
167#endif
Benjamin Herrenschmidt5e696612008-12-18 19:13:24 +0000168}
169
Laurent Dufour83d3f0e2015-06-24 16:56:22 -0700170static inline void arch_dup_mmap(struct mm_struct *oldmm,
171 struct mm_struct *mm)
172{
173}
174
175static inline void arch_exit_mmap(struct mm_struct *mm)
176{
177}
178
179static inline void arch_unmap(struct mm_struct *mm,
180 struct vm_area_struct *vma,
181 unsigned long start, unsigned long end)
182{
183 if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
184 mm->context.vdso_base = 0;
185}
186
187static inline void arch_bprm_mm_init(struct mm_struct *mm,
188 struct vm_area_struct *vma)
189{
190}
191
Dave Hansen1b2ee122016-02-12 13:02:21 -0800192static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
Dave Hansend61172b2016-02-12 13:02:24 -0800193 bool write, bool execute, bool foreign)
Dave Hansen33a709b2016-02-12 13:02:19 -0800194{
195 /* by default, allow everything */
196 return true;
197}
Arnd Bergmann88ced032005-12-16 22:43:46 +0100198#endif /* __KERNEL__ */
Paul Mackerras047ea782005-11-19 20:17:32 +1100199#endif /* __ASM_POWERPC_MMU_CONTEXT_H */