blob: 9074a54c4d10f843320ae85819895024dbf5d67f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _S390_TLB_H
2#define _S390_TLB_H
3
4/*
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02005 * TLB flushing on s390 is complicated. The following requirement
6 * from the principles of operation is the most arduous:
7 *
8 * "A valid table entry must not be changed while it is attached
9 * to any CPU and may be used for translation by that CPU except to
10 * (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY,
11 * or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page
12 * table entry, or (3) make a change by means of a COMPARE AND SWAP
13 * AND PURGE instruction that purges the TLB."
14 *
15 * The modification of a pte of an active mm struct therefore is
16 * a two step process: i) invalidate the pte, ii) store the new pte.
17 * This is true for the page protection bit as well.
18 * The only possible optimization is to flush at the beginning of
19 * a tlb_gather_mmu cycle if the mm_struct is currently not in use.
20 *
21 * Pages used for the page tables is a different story. FIXME: more
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 */
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020023
24#include <linux/mm.h>
Heiko Carstensc84ca002011-01-31 11:30:06 +010025#include <linux/pagemap.h>
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020026#include <linux/swap.h>
27#include <asm/processor.h>
28#include <asm/pgalloc.h>
29#include <asm/smp.h>
30#include <asm/tlbflush.h>
31
32#ifndef CONFIG_SMP
33#define TLB_NR_PTRS 1
34#else
35#define TLB_NR_PTRS 508
36#endif
37
38struct mmu_gather {
39 struct mm_struct *mm;
40 unsigned int fullmm;
41 unsigned int nr_ptes;
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010042 unsigned int nr_pxds;
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020043 void *array[TLB_NR_PTRS];
44};
45
46DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
47
48static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm,
49 unsigned int full_mm_flush)
50{
51 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
52
53 tlb->mm = mm;
Martin Schwidefsky050eef32010-08-24 09:26:21 +020054 tlb->fullmm = full_mm_flush;
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020055 tlb->nr_ptes = 0;
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010056 tlb->nr_pxds = TLB_NR_PTRS;
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020057 if (tlb->fullmm)
58 __tlb_flush_mm(mm);
59 return tlb;
60}
61
62static inline void tlb_flush_mmu(struct mmu_gather *tlb,
63 unsigned long start, unsigned long end)
64{
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010065 if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < TLB_NR_PTRS))
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020066 __tlb_flush_mm(tlb->mm);
67 while (tlb->nr_ptes > 0)
Martin Schwidefsky80217142010-10-25 16:10:11 +020068 page_table_free_rcu(tlb->mm, tlb->array[--tlb->nr_ptes]);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010069 while (tlb->nr_pxds < TLB_NR_PTRS)
Martin Schwidefsky80217142010-10-25 16:10:11 +020070 crst_table_free_rcu(tlb->mm, tlb->array[tlb->nr_pxds++]);
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020071}
72
73static inline void tlb_finish_mmu(struct mmu_gather *tlb,
74 unsigned long start, unsigned long end)
75{
76 tlb_flush_mmu(tlb, start, end);
77
Martin Schwidefsky80217142010-10-25 16:10:11 +020078 rcu_table_freelist_finish();
79
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020080 /* keep the page table cache within bounds */
81 check_pgt_cache();
82
83 put_cpu_var(mmu_gathers);
84}
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
86/*
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020087 * Release the page cache reference for a pte removed by
88 * tlb_ptep_clear_flush. In both flush modes the tlb fo a page cache page
89 * has already been freed, so just do free_page_and_swap_cache.
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 */
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020091static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
92{
93 free_page_and_swap_cache(page);
94}
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020096/*
97 * pte_free_tlb frees a pte table and clears the CRSTE for the
98 * page table from the tlb.
99 */
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000100static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
101 unsigned long address)
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200102{
103 if (!tlb->fullmm) {
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100104 tlb->array[tlb->nr_ptes++] = pte;
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100105 if (tlb->nr_ptes >= tlb->nr_pxds)
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200106 tlb_flush_mmu(tlb, 0, 0);
107 } else
Martin Schwidefsky80217142010-10-25 16:10:11 +0200108 page_table_free(tlb->mm, (unsigned long *) pte);
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200109}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200111/*
112 * pmd_free_tlb frees a pmd table and clears the CRSTE for the
113 * segment table entry from the tlb.
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100114 * If the mm uses a two level page table the single pmd is freed
115 * as the pgd. pmd_free_tlb checks the asce_limit against 2GB
116 * to avoid the double free of the pmd in this case.
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200117 */
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000118static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
119 unsigned long address)
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200120{
121#ifdef __s390x__
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100122 if (tlb->mm->context.asce_limit <= (1UL << 31))
123 return;
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200124 if (!tlb->fullmm) {
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100125 tlb->array[--tlb->nr_pxds] = pmd;
126 if (tlb->nr_ptes >= tlb->nr_pxds)
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200127 tlb_flush_mmu(tlb, 0, 0);
128 } else
Martin Schwidefsky80217142010-10-25 16:10:11 +0200129 crst_table_free(tlb->mm, (unsigned long *) pmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130#endif
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200131}
132
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100133/*
134 * pud_free_tlb frees a pud table and clears the CRSTE for the
135 * region third table entry from the tlb.
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100136 * If the mm uses a three level page table the single pud is freed
137 * as the pgd. pud_free_tlb checks the asce_limit against 4TB
138 * to avoid the double free of the pud in this case.
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100139 */
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000140static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
141 unsigned long address)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100142{
143#ifdef __s390x__
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100144 if (tlb->mm->context.asce_limit <= (1UL << 42))
145 return;
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100146 if (!tlb->fullmm) {
147 tlb->array[--tlb->nr_pxds] = pud;
148 if (tlb->nr_ptes >= tlb->nr_pxds)
149 tlb_flush_mmu(tlb, 0, 0);
150 } else
Martin Schwidefsky80217142010-10-25 16:10:11 +0200151 crst_table_free(tlb->mm, (unsigned long *) pud);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100152#endif
153}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200154
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200155#define tlb_start_vma(tlb, vma) do { } while (0)
156#define tlb_end_vma(tlb, vma) do { } while (0)
157#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
158#define tlb_migrate_finish(mm) do { } while (0)
159
160#endif /* _S390_TLB_H */