Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
| 8 | * vineetg: May 2011 |
| 9 | * -Refactored get_new_mmu_context( ) to only handle live-mm. |
| 10 | * retiring-mm handled in other hooks |
| 11 | * |
| 12 | * Vineetg: March 25th, 2008: Bug #92690 |
| 13 | * -Major rewrite of Core ASID allocation routine get_new_mmu_context |
| 14 | * |
| 15 | * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 |
| 16 | */ |
| 17 | |
| 18 | #ifndef _ASM_ARC_MMU_CONTEXT_H |
| 19 | #define _ASM_ARC_MMU_CONTEXT_H |
| 20 | |
| 21 | #include <asm/arcregs.h> |
| 22 | #include <asm/tlb.h> |
| 23 | |
| 24 | #include <asm-generic/mm_hooks.h> |
| 25 | |
| 26 | /* ARC700 ASID Management |
| 27 | * |
| 28 | * ARC MMU provides 8-bit ASID (0..255) to TAG TLB entries, allowing entries |
| 29 | * with same vaddr (different tasks) to co-exit. This provides for |
| 30 | * "Fast Context Switch" i.e. no TLB flush on ctxt-switch |
| 31 | * |
| 32 | * Linux assigns each task a unique ASID. A simple round-robin allocation |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 33 | * of H/w ASID is done using software tracker @asid_cpu. |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 34 | * When it reaches max 255, the allocation cycle starts afresh by flushing |
| 35 | * the entire TLB and wrapping ASID back to zero. |
| 36 | * |
Vineet Gupta | 947bf10 | 2013-07-25 15:45:50 -0700 | [diff] [blame] | 37 | * A new allocation cycle, post rollover, could potentially reassign an ASID |
| 38 | * to a different task. Thus the rule is to refresh the ASID in a new cycle. |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 39 | * The 32 bit @asid_cpu (and mm->asid) have 8 bits MMU PID and rest 24 bits |
Vineet Gupta | 947bf10 | 2013-07-25 15:45:50 -0700 | [diff] [blame] | 40 | * serve as cycle/generation indicator and natural 32 bit unsigned math |
| 41 | * automagically increments the generation when lower 8 bits rollover. |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 42 | */ |
| 43 | |
Vineet Gupta | 947bf10 | 2013-07-25 15:45:50 -0700 | [diff] [blame] | 44 | #define MM_CTXT_ASID_MASK 0x000000ff /* MMU PID reg :8 bit PID */ |
| 45 | #define MM_CTXT_CYCLE_MASK (~MM_CTXT_ASID_MASK) |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 46 | |
Vineet Gupta | 947bf10 | 2013-07-25 15:45:50 -0700 | [diff] [blame] | 47 | #define MM_CTXT_FIRST_CYCLE (MM_CTXT_ASID_MASK + 1) |
| 48 | #define MM_CTXT_NO_ASID 0UL |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 49 | |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 50 | #define asid_mm(mm, cpu) mm->context.asid[cpu] |
| 51 | #define hw_pid(mm, cpu) (asid_mm(mm, cpu) & MM_CTXT_ASID_MASK) |
Vineet Gupta | 947bf10 | 2013-07-25 15:45:50 -0700 | [diff] [blame] | 52 | |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 53 | DECLARE_PER_CPU(unsigned int, asid_cache); |
| 54 | #define asid_cpu(cpu) per_cpu(asid_cache, cpu) |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 55 | |
| 56 | /* |
Vineet Gupta | 3daa48d | 2013-07-24 13:53:45 -0700 | [diff] [blame] | 57 | * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle) |
| 58 | * Also set the MMU PID register to existing/updated ASID |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 59 | */ |
| 60 | static inline void get_new_mmu_context(struct mm_struct *mm) |
| 61 | { |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 62 | const unsigned int cpu = smp_processor_id(); |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 63 | unsigned long flags; |
| 64 | |
| 65 | local_irq_save(flags); |
| 66 | |
| 67 | /* |
Vineet Gupta | 3daa48d | 2013-07-24 13:53:45 -0700 | [diff] [blame] | 68 | * Move to new ASID if it was not from current alloc-cycle/generation. |
Vineet Gupta | 947bf10 | 2013-07-25 15:45:50 -0700 | [diff] [blame] | 69 | * This is done by ensuring that the generation bits in both mm->ASID |
| 70 | * and cpu's ASID counter are exactly same. |
Vineet Gupta | 3daa48d | 2013-07-24 13:53:45 -0700 | [diff] [blame] | 71 | * |
| 72 | * Note: Callers needing new ASID unconditionally, independent of |
| 73 | * generation, e.g. local_flush_tlb_mm() for forking parent, |
| 74 | * first need to destroy the context, setting it to invalid |
| 75 | * value. |
| 76 | */ |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 77 | if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK)) |
Vineet Gupta | 3daa48d | 2013-07-24 13:53:45 -0700 | [diff] [blame] | 78 | goto set_hw; |
| 79 | |
Vineet Gupta | 947bf10 | 2013-07-25 15:45:50 -0700 | [diff] [blame] | 80 | /* move to new ASID and handle rollover */ |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 81 | if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) { |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 82 | |
Vineet Gupta | 5ea72a9 | 2013-10-27 14:49:02 +0530 | [diff] [blame] | 83 | local_flush_tlb_all(); |
Vineet Gupta | 947bf10 | 2013-07-25 15:45:50 -0700 | [diff] [blame] | 84 | |
| 85 | /* |
Andrea Gelmini | 2547476 | 2016-05-21 13:45:35 +0200 | [diff] [blame] | 86 | * Above check for rollover of 8 bit ASID in 32 bit container. |
Vineet Gupta | 947bf10 | 2013-07-25 15:45:50 -0700 | [diff] [blame] | 87 | * If the container itself wrapped around, set it to a non zero |
| 88 | * "generation" to distinguish from no context |
| 89 | */ |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 90 | if (!asid_cpu(cpu)) |
| 91 | asid_cpu(cpu) = MM_CTXT_FIRST_CYCLE; |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 92 | } |
| 93 | |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 94 | /* Assign new ASID to tsk */ |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 95 | asid_mm(mm, cpu) = asid_cpu(cpu); |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 96 | |
Vineet Gupta | 3daa48d | 2013-07-24 13:53:45 -0700 | [diff] [blame] | 97 | set_hw: |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 98 | write_aux_reg(ARC_REG_PID, hw_pid(mm, cpu) | MMU_ENABLE); |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 99 | |
| 100 | local_irq_restore(flags); |
| 101 | } |
| 102 | |
| 103 | /* |
| 104 | * Initialize the context related info for a new mm_struct |
| 105 | * instance. |
| 106 | */ |
| 107 | static inline int |
| 108 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
| 109 | { |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 110 | int i; |
| 111 | |
| 112 | for_each_possible_cpu(i) |
| 113 | asid_mm(mm, i) = MM_CTXT_NO_ASID; |
| 114 | |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 115 | return 0; |
| 116 | } |
| 117 | |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 118 | static inline void destroy_context(struct mm_struct *mm) |
| 119 | { |
| 120 | unsigned long flags; |
| 121 | |
| 122 | /* Needed to elide CONFIG_DEBUG_PREEMPT warning */ |
| 123 | local_irq_save(flags); |
| 124 | asid_mm(mm, smp_processor_id()) = MM_CTXT_NO_ASID; |
| 125 | local_irq_restore(flags); |
| 126 | } |
| 127 | |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 128 | /* Prepare the MMU for task: setup PID reg with allocated ASID |
| 129 | If task doesn't have an ASID (never alloc or stolen, get a new ASID) |
| 130 | */ |
| 131 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
| 132 | struct task_struct *tsk) |
| 133 | { |
Vineet Gupta | 5ea72a9 | 2013-10-27 14:49:02 +0530 | [diff] [blame] | 134 | const int cpu = smp_processor_id(); |
| 135 | |
| 136 | /* |
| 137 | * Note that the mm_cpumask is "aggregating" only, we don't clear it |
| 138 | * for the switched-out task, unlike some other arches. |
| 139 | * It is used to enlist cpus for sending TLB flush IPIs and not sending |
| 140 | * it to CPUs where a task once ran-on, could cause stale TLB entry |
| 141 | * re-use, specially for a multi-threaded task. |
| 142 | * e.g. T1 runs on C1, migrates to C3. T2 running on C2 munmaps. |
| 143 | * For a non-aggregating mm_cpumask, IPI not sent C1, and if T1 |
| 144 | * were to re-migrate to C1, it could access the unmapped region |
| 145 | * via any existing stale TLB entries. |
| 146 | */ |
| 147 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
| 148 | |
Vineet Gupta | 41195d2 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 149 | #ifndef CONFIG_SMP |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 150 | /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */ |
| 151 | write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); |
Vineet Gupta | 41195d2 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 152 | #endif |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 153 | |
Vineet Gupta | 3daa48d | 2013-07-24 13:53:45 -0700 | [diff] [blame] | 154 | get_new_mmu_context(next); |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 155 | } |
| 156 | |
Vineet Gupta | c601155 | 2013-07-24 17:31:08 -0700 | [diff] [blame] | 157 | /* |
| 158 | * Called at the time of execve() to get a new ASID |
| 159 | * Note the subtlety here: get_new_mmu_context() behaves differently here |
| 160 | * vs. in switch_mm(). Here it always returns a new ASID, because mm has |
| 161 | * an unallocated "initial" value, while in latter, it moves to a new ASID, |
| 162 | * only if it was unallocated |
| 163 | */ |
| 164 | #define activate_mm(prev, next) switch_mm(prev, next, NULL) |
| 165 | |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 166 | /* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping |
| 167 | * for retiring-mm. However destroy_context( ) still needs to do that because |
| 168 | * between mm_release( ) = >deactive_mm( ) and |
| 169 | * mmput => .. => __mmdrop( ) => destroy_context( ) |
| 170 | * there is a good chance that task gets sched-out/in, making it's ASID valid |
| 171 | * again (this teased me for a whole day). |
| 172 | */ |
| 173 | #define deactivate_mm(tsk, mm) do { } while (0) |
| 174 | |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 175 | #define enter_lazy_tlb(mm, tsk) |
| 176 | |
| 177 | #endif /* __ASM_ARC_MMU_CONTEXT_H */ |