blob: 9d2cd0c36ec27645eb82aa53a5d39b04ae422778 [file] [log] [blame]
Paul Mackerras047ea782005-11-19 20:17:32 +11001#ifndef __ASM_POWERPC_MMU_CONTEXT_H
2#define __ASM_POWERPC_MMU_CONTEXT_H
Arnd Bergmann88ced032005-12-16 22:43:46 +01003#ifdef __KERNEL__
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
Benjamin Herrenschmidt5e696612008-12-18 19:13:24 +00005#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/sched.h>
8#include <linux/spinlock.h>
Kumar Gala80a7cc62007-07-03 03:22:05 -05009#include <asm/mmu.h>
10#include <asm/cputable.h>
Benjamin Herrenschmidt5e696612008-12-18 19:13:24 +000011#include <asm/cputhreads.h>
Kumar Gala80a7cc62007-07-03 03:22:05 -050012
13/*
Benjamin Herrenschmidt5e696612008-12-18 19:13:24 +000014 * Most if the context management is out of line
Kumar Gala80a7cc62007-07-03 03:22:05 -050015 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070016extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
17extern void destroy_context(struct mm_struct *mm);
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +100018#ifdef CONFIG_SPAPR_TCE_IOMMU
19struct mm_iommu_table_group_mem_t;
20
21extern bool mm_iommu_preregistered(void);
22extern long mm_iommu_get(unsigned long ua, unsigned long entries,
23 struct mm_iommu_table_group_mem_t **pmem);
24extern long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem);
25extern void mm_iommu_init(mm_context_t *ctx);
26extern void mm_iommu_cleanup(mm_context_t *ctx);
27extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua,
28 unsigned long size);
29extern struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua,
30 unsigned long entries);
31extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
32 unsigned long ua, unsigned long *hpa);
33extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
34extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
35#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070036extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
Benjamin Herrenschmidt5e696612008-12-18 19:13:24 +000037extern void set_context(unsigned long id, pgd_t *pgd);
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Benjamin Herrenschmidt6f0ef0f2009-07-23 23:15:26 +000039#ifdef CONFIG_PPC_BOOK3S_64
Aneesh Kumar K.V7e381c02016-04-29 23:26:02 +100040extern void radix__switch_mmu_context(struct mm_struct *prev,
41 struct mm_struct *next);
Aneesh Kumar K.Vd2adba32016-04-29 23:26:01 +100042static inline void switch_mmu_context(struct mm_struct *prev,
43 struct mm_struct *next,
44 struct task_struct *tsk)
45{
Aneesh Kumar K.V7e381c02016-04-29 23:26:02 +100046 if (radix_enabled())
47 return radix__switch_mmu_context(prev, next);
Aneesh Kumar K.Vd2adba32016-04-29 23:26:01 +100048 return switch_slb(tsk, next);
49}
50
Alexander Grafe85a4712009-11-02 12:02:30 +000051extern int __init_new_context(void);
52extern void __destroy_context(int context_id);
Benjamin Herrenschmidt6f0ef0f2009-07-23 23:15:26 +000053static inline void mmu_context_init(void) { }
54#else
Aneesh Kumar K.Vd2adba32016-04-29 23:26:01 +100055extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
56 struct task_struct *tsk);
Alexander Grafc83ec262010-04-16 00:11:36 +020057extern unsigned long __init_new_context(void);
58extern void __destroy_context(unsigned long context_id);
Benjamin Herrenschmidt6f0ef0f2009-07-23 23:15:26 +000059extern void mmu_context_init(void);
60#endif
61
Tseng-Hui (Frank) Lin851d2e22011-05-02 20:43:04 +000062extern void switch_cop(struct mm_struct *next);
63extern int use_cop(unsigned long acop, struct mm_struct *mm);
64extern void drop_cop(unsigned long acop, struct mm_struct *mm);
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/*
67 * switch_mm is the entry point called from the architecture independent
Viresh Kumar0a0fca92013-06-04 13:10:24 +053068 * code in kernel/sched/core.c
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 */
70static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
71 struct task_struct *tsk)
72{
Benjamin Herrenschmidt5e696612008-12-18 19:13:24 +000073 /* Mark this context has been used on the new CPU */
Rusty Russell56aa4122009-03-15 18:16:43 +000074 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
Benjamin Herrenschmidt5e696612008-12-18 19:13:24 +000076 /* 32-bit keeps track of the current PGDIR in the thread struct */
77#ifdef CONFIG_PPC32
78 tsk->thread.pgdir = next->pgd;
79#endif /* CONFIG_PPC32 */
80
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +000081 /* 64-bit Book3E keeps track of current PGD in the PACA */
82#ifdef CONFIG_PPC_BOOK3E_64
83 get_paca()->pgd = next->pgd;
84#endif
Benjamin Herrenschmidt5e696612008-12-18 19:13:24 +000085 /* Nothing else to do if we aren't actually switching */
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 if (prev == next)
87 return;
88
Tseng-Hui (Frank) Lin851d2e22011-05-02 20:43:04 +000089#ifdef CONFIG_PPC_ICSWX
90 /* Switch coprocessor context only if prev or next uses a coprocessor */
91 if (prev->context.acop || next->context.acop)
92 switch_cop(next);
93#endif /* CONFIG_PPC_ICSWX */
94
Benjamin Herrenschmidt5e696612008-12-18 19:13:24 +000095 /* We must stop all altivec streams before changing the HW
96 * context
97 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#ifdef CONFIG_ALTIVEC
99 if (cpu_has_feature(CPU_FTR_ALTIVEC))
100 asm volatile ("dssall");
101#endif /* CONFIG_ALTIVEC */
Aneesh Kumar K.Vd2adba32016-04-29 23:26:01 +1000102 /*
103 * The actual HW switching method differs between the various
104 * sub architectures. Out of line for now
Benjamin Herrenschmidt5e696612008-12-18 19:13:24 +0000105 */
Aneesh Kumar K.Vd2adba32016-04-29 23:26:01 +1000106 switch_mmu_context(prev, next, tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107}
108
109#define deactivate_mm(tsk,mm) do { } while (0)
110
111/*
112 * After we have set current->mm to a new value, this activates
113 * the context for the new mm so we see the new mappings.
114 */
115static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
116{
117 unsigned long flags;
118
119 local_irq_save(flags);
120 switch_mm(prev, next, current);
121 local_irq_restore(flags);
122}
123
Benjamin Herrenschmidt5e696612008-12-18 19:13:24 +0000124/* We don't currently use enter_lazy_tlb() for anything */
125static inline void enter_lazy_tlb(struct mm_struct *mm,
126 struct task_struct *tsk)
127{
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +0000128 /* 64-bit Book3E keeps track of current PGD in the PACA */
129#ifdef CONFIG_PPC_BOOK3E_64
130 get_paca()->pgd = NULL;
131#endif
Benjamin Herrenschmidt5e696612008-12-18 19:13:24 +0000132}
133
Laurent Dufour83d3f0e2015-06-24 16:56:22 -0700134static inline void arch_dup_mmap(struct mm_struct *oldmm,
135 struct mm_struct *mm)
136{
137}
138
139static inline void arch_exit_mmap(struct mm_struct *mm)
140{
141}
142
143static inline void arch_unmap(struct mm_struct *mm,
144 struct vm_area_struct *vma,
145 unsigned long start, unsigned long end)
146{
147 if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
148 mm->context.vdso_base = 0;
149}
150
151static inline void arch_bprm_mm_init(struct mm_struct *mm,
152 struct vm_area_struct *vma)
153{
154}
155
Dave Hansen1b2ee122016-02-12 13:02:21 -0800156static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
Dave Hansend61172b2016-02-12 13:02:24 -0800157 bool write, bool execute, bool foreign)
Dave Hansen33a709b2016-02-12 13:02:19 -0800158{
159 /* by default, allow everything */
160 return true;
161}
162
163static inline bool arch_pte_access_permitted(pte_t pte, bool write)
164{
165 /* by default, allow everything */
166 return true;
167}
Arnd Bergmann88ced032005-12-16 22:43:46 +0100168#endif /* __KERNEL__ */
Paul Mackerras047ea782005-11-19 20:17:32 +1100169#endif /* __ASM_POWERPC_MMU_CONTEXT_H */