blob: 0dffb4ce8a1d5198636d2db45768bdecad686d29 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* $Id: mmu_context.h,v 1.54 2002/02/09 19:49:31 davem Exp $ */
2#ifndef __SPARC64_MMU_CONTEXT_H
3#define __SPARC64_MMU_CONTEXT_H
4
5/* Derived heavily from Linus's Alpha/AXP ASN code... */
6
7#ifndef __ASSEMBLY__
8
9#include <linux/spinlock.h>
10#include <asm/system.h>
11#include <asm/spitfire.h>
12
13static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
14{
15}
16
17extern spinlock_t ctx_alloc_lock;
18extern unsigned long tlb_context_cache;
19extern unsigned long mmu_context_bmap[];
20
21extern void get_new_mmu_context(struct mm_struct *mm);
David S. Miller09f94282006-01-31 18:31:06 -080022extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
23extern void destroy_context(struct mm_struct *mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
David S. Miller74bf4312006-01-31 18:29:18 -080025extern unsigned long tsb_context_switch(unsigned long pgd_pa, unsigned long *tsb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
27/* Set MMU context in the actual hardware. */
28#define load_secondary_context(__mm) \
29 __asm__ __volatile__("stxa %0, [%1] %2\n\t" \
30 "flush %%g6" \
31 : /* No outputs */ \
32 : "r" (CTX_HWBITS((__mm)->context)), \
33 "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU))
34
35extern void __flush_tlb_mm(unsigned long, unsigned long);
36
37/* Switch the current MM context. */
38static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
39{
40 unsigned long ctx_valid;
Hugh Dickinsdedeb002005-11-07 14:09:01 -080041 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Hugh Dickinsdedeb002005-11-07 14:09:01 -080043 /* Note: page_table_lock is used here to serialize switch_mm
44 * and activate_mm, and their calls to get_new_mmu_context.
45 * This use of page_table_lock is unrelated to its other uses.
46 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 spin_lock(&mm->page_table_lock);
Hugh Dickinsdedeb002005-11-07 14:09:01 -080048 ctx_valid = CTX_VALID(mm->context);
49 if (!ctx_valid)
50 get_new_mmu_context(mm);
51 spin_unlock(&mm->page_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
53 if (!ctx_valid || (old_mm != mm)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 load_secondary_context(mm);
David S. Miller74bf4312006-01-31 18:29:18 -080055 tsb_context_switch(__pa(mm->pgd),
56 mm->context.sparc64_tsb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 }
58
Hugh Dickinsdedeb002005-11-07 14:09:01 -080059 /* Even if (mm == old_mm) we _must_ check
60 * the cpu_vm_mask. If we do not we could
61 * corrupt the TLB state because of how
62 * smp_flush_tlb_{page,range,mm} on sparc64
63 * and lazy tlb switches work. -DaveM
64 */
65 cpu = smp_processor_id();
66 if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) {
67 cpu_set(cpu, mm->cpu_vm_mask);
68 __flush_tlb_mm(CTX_HWBITS(mm->context),
69 SECONDARY_CONTEXT);
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
73#define deactivate_mm(tsk,mm) do { } while (0)
74
75/* Activate a new MM instance for the current task. */
76static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
77{
78 int cpu;
79
Hugh Dickinsdedeb002005-11-07 14:09:01 -080080 /* Note: page_table_lock is used here to serialize switch_mm
81 * and activate_mm, and their calls to get_new_mmu_context.
82 * This use of page_table_lock is unrelated to its other uses.
83 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 spin_lock(&mm->page_table_lock);
85 if (!CTX_VALID(mm->context))
86 get_new_mmu_context(mm);
87 cpu = smp_processor_id();
88 if (!cpu_isset(cpu, mm->cpu_vm_mask))
89 cpu_set(cpu, mm->cpu_vm_mask);
90 spin_unlock(&mm->page_table_lock);
91
92 load_secondary_context(mm);
93 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
David S. Miller74bf4312006-01-31 18:29:18 -080094 tsb_context_switch(__pa(mm->pgd), mm->context.sparc64_tsb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095}
96
97#endif /* !(__ASSEMBLY__) */
98
99#endif /* !(__SPARC64_MMU_CONTEXT_H) */