Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _S390_TLB_H |
| 2 | #define _S390_TLB_H |
| 3 | |
| 4 | /* |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 5 | * TLB flushing on s390 is complicated. The following requirement |
| 6 | * from the principles of operation is the most arduous: |
| 7 | * |
| 8 | * "A valid table entry must not be changed while it is attached |
| 9 | * to any CPU and may be used for translation by that CPU except to |
| 10 | * (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY, |
| 11 | * or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page |
| 12 | * table entry, or (3) make a change by means of a COMPARE AND SWAP |
| 13 | * AND PURGE instruction that purges the TLB." |
| 14 | * |
| 15 | * The modification of a pte of an active mm struct therefore is |
| 16 | * a two step process: i) invalidate the pte, ii) store the new pte. |
| 17 | * This is true for the page protection bit as well. |
| 18 | * The only possible optimization is to flush at the beginning of |
| 19 | * a tlb_gather_mmu cycle if the mm_struct is currently not in use. |
| 20 | * |
| 21 | * Pages used for the page tables is a different story. FIXME: more |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | */ |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 23 | |
| 24 | #include <linux/mm.h> |
Heiko Carstens | c84ca00 | 2011-01-31 11:30:06 +0100 | [diff] [blame^] | 25 | #include <linux/pagemap.h> |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 26 | #include <linux/swap.h> |
| 27 | #include <asm/processor.h> |
| 28 | #include <asm/pgalloc.h> |
| 29 | #include <asm/smp.h> |
| 30 | #include <asm/tlbflush.h> |
| 31 | |
| 32 | #ifndef CONFIG_SMP |
| 33 | #define TLB_NR_PTRS 1 |
| 34 | #else |
| 35 | #define TLB_NR_PTRS 508 |
| 36 | #endif |
| 37 | |
| 38 | struct mmu_gather { |
| 39 | struct mm_struct *mm; |
| 40 | unsigned int fullmm; |
| 41 | unsigned int nr_ptes; |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 42 | unsigned int nr_pxds; |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 43 | void *array[TLB_NR_PTRS]; |
| 44 | }; |
| 45 | |
| 46 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); |
| 47 | |
| 48 | static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, |
| 49 | unsigned int full_mm_flush) |
| 50 | { |
| 51 | struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); |
| 52 | |
| 53 | tlb->mm = mm; |
Martin Schwidefsky | 050eef3 | 2010-08-24 09:26:21 +0200 | [diff] [blame] | 54 | tlb->fullmm = full_mm_flush; |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 55 | tlb->nr_ptes = 0; |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 56 | tlb->nr_pxds = TLB_NR_PTRS; |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 57 | if (tlb->fullmm) |
| 58 | __tlb_flush_mm(mm); |
| 59 | return tlb; |
| 60 | } |
| 61 | |
| 62 | static inline void tlb_flush_mmu(struct mmu_gather *tlb, |
| 63 | unsigned long start, unsigned long end) |
| 64 | { |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 65 | if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < TLB_NR_PTRS)) |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 66 | __tlb_flush_mm(tlb->mm); |
| 67 | while (tlb->nr_ptes > 0) |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 68 | page_table_free_rcu(tlb->mm, tlb->array[--tlb->nr_ptes]); |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 69 | while (tlb->nr_pxds < TLB_NR_PTRS) |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 70 | crst_table_free_rcu(tlb->mm, tlb->array[tlb->nr_pxds++]); |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 71 | } |
| 72 | |
| 73 | static inline void tlb_finish_mmu(struct mmu_gather *tlb, |
| 74 | unsigned long start, unsigned long end) |
| 75 | { |
| 76 | tlb_flush_mmu(tlb, start, end); |
| 77 | |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 78 | rcu_table_freelist_finish(); |
| 79 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 80 | /* keep the page table cache within bounds */ |
| 81 | check_pgt_cache(); |
| 82 | |
| 83 | put_cpu_var(mmu_gathers); |
| 84 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | |
| 86 | /* |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 87 | * Release the page cache reference for a pte removed by |
| 88 | * tlb_ptep_clear_flush. In both flush modes the tlb fo a page cache page |
| 89 | * has already been freed, so just do free_page_and_swap_cache. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | */ |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 91 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
| 92 | { |
| 93 | free_page_and_swap_cache(page); |
| 94 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 96 | /* |
| 97 | * pte_free_tlb frees a pte table and clears the CRSTE for the |
| 98 | * page table from the tlb. |
| 99 | */ |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 100 | static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, |
| 101 | unsigned long address) |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 102 | { |
| 103 | if (!tlb->fullmm) { |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 104 | tlb->array[tlb->nr_ptes++] = pte; |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 105 | if (tlb->nr_ptes >= tlb->nr_pxds) |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 106 | tlb_flush_mmu(tlb, 0, 0); |
| 107 | } else |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 108 | page_table_free(tlb->mm, (unsigned long *) pte); |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 109 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 111 | /* |
| 112 | * pmd_free_tlb frees a pmd table and clears the CRSTE for the |
| 113 | * segment table entry from the tlb. |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 114 | * If the mm uses a two level page table the single pmd is freed |
| 115 | * as the pgd. pmd_free_tlb checks the asce_limit against 2GB |
| 116 | * to avoid the double free of the pmd in this case. |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 117 | */ |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 118 | static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, |
| 119 | unsigned long address) |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 120 | { |
| 121 | #ifdef __s390x__ |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 122 | if (tlb->mm->context.asce_limit <= (1UL << 31)) |
| 123 | return; |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 124 | if (!tlb->fullmm) { |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 125 | tlb->array[--tlb->nr_pxds] = pmd; |
| 126 | if (tlb->nr_ptes >= tlb->nr_pxds) |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 127 | tlb_flush_mmu(tlb, 0, 0); |
| 128 | } else |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 129 | crst_table_free(tlb->mm, (unsigned long *) pmd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | #endif |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 131 | } |
| 132 | |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 133 | /* |
| 134 | * pud_free_tlb frees a pud table and clears the CRSTE for the |
| 135 | * region third table entry from the tlb. |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 136 | * If the mm uses a three level page table the single pud is freed |
| 137 | * as the pgd. pud_free_tlb checks the asce_limit against 4TB |
| 138 | * to avoid the double free of the pud in this case. |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 139 | */ |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 140 | static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, |
| 141 | unsigned long address) |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 142 | { |
| 143 | #ifdef __s390x__ |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 144 | if (tlb->mm->context.asce_limit <= (1UL << 42)) |
| 145 | return; |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 146 | if (!tlb->fullmm) { |
| 147 | tlb->array[--tlb->nr_pxds] = pud; |
| 148 | if (tlb->nr_ptes >= tlb->nr_pxds) |
| 149 | tlb_flush_mmu(tlb, 0, 0); |
| 150 | } else |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 151 | crst_table_free(tlb->mm, (unsigned long *) pud); |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 152 | #endif |
| 153 | } |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 154 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 155 | #define tlb_start_vma(tlb, vma) do { } while (0) |
| 156 | #define tlb_end_vma(tlb, vma) do { } while (0) |
| 157 | #define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0) |
| 158 | #define tlb_migrate_finish(mm) do { } while (0) |
| 159 | |
| 160 | #endif /* _S390_TLB_H */ |