blob: 75c29cff0f992758cec2c66c38961795f5962773 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003 Ralf Baechle
7 */
8#ifndef _ASM_PGTABLE_H
9#define _ASM_PGTABLE_H
10
Corey Minyard5bbea362013-04-08 16:06:35 +020011#include <linux/mm_types.h>
Ralf Baechle970d0322012-10-18 13:54:15 +020012#include <linux/mmzone.h>
Ralf Baechle875d43e2005-09-03 15:56:16 -070013#ifdef CONFIG_32BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <asm/pgtable-32.h>
15#endif
Ralf Baechle875d43e2005-09-03 15:56:16 -070016#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <asm/pgtable-64.h>
18#endif
19
Pete Popovf10fae02005-07-14 00:17:05 +000020#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <asm/pgtable-bits.h>
22
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080023struct mm_struct;
24struct vm_area_struct;
25
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
Steven J. Hillbe0c37c2015-02-26 18:16:37 -060027#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | \
Chris Dearman35133692007-09-19 00:58:24 +010028 _page_cachable_default)
Steven J. Hillbe0c37c2015-02-26 18:16:37 -060029#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_NO_EXEC | \
30 _page_cachable_default)
31#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \
Chris Dearman35133692007-09-19 00:58:24 +010032 _page_cachable_default)
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
Chris Dearman35133692007-09-19 00:58:24 +010034 _PAGE_GLOBAL | _page_cachable_default)
Paul Burtone2a9e5a2014-03-03 12:08:40 +000035#define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
36 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
Steven J. Hillbe0c37c2015-02-26 18:16:37 -060037#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
Chris Dearman35133692007-09-19 00:58:24 +010038 _page_cachable_default)
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
40 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
41
42/*
David Daney6dd93442010-02-10 15:12:47 -080043 * If _PAGE_NO_EXEC is not defined, we can't do page protection for
44 * execute, and consider it to be the same as read. Also, write
45 * permissions imply read permissions. This is the closest we can get
46 * by reasonable means..
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Chris Dearman35133692007-09-19 00:58:24 +010049/*
50 * Dummy values to fill the table in mmap.c
51 * The real values will be generated at runtime
52 */
53#define __P000 __pgprot(0)
54#define __P001 __pgprot(0)
55#define __P010 __pgprot(0)
56#define __P011 __pgprot(0)
57#define __P100 __pgprot(0)
58#define __P101 __pgprot(0)
59#define __P110 __pgprot(0)
60#define __P111 __pgprot(0)
61
62#define __S000 __pgprot(0)
63#define __S001 __pgprot(0)
64#define __S010 __pgprot(0)
65#define __S011 __pgprot(0)
66#define __S100 __pgprot(0)
67#define __S101 __pgprot(0)
68#define __S110 __pgprot(0)
69#define __S111 __pgprot(0)
70
71extern unsigned long _page_cachable_default;
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
73/*
74 * ZERO_PAGE is a global shared page that is always zero; used
75 * for zero-mapped memory areas etc..
76 */
77
78extern unsigned long empty_zero_page;
79extern unsigned long zero_page_mask;
80
81#define ZERO_PAGE(vaddr) \
Franck Bui-Huu99e3b942006-10-19 13:19:59 +020082 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
Kirill A. Shutemov816422a2012-12-12 13:52:36 -080083#define __HAVE_COLOR_ZERO_PAGE
Hugh Dickins62eede62009-09-21 17:03:34 -070084
Linus Torvalds1da177e2005-04-16 15:20:36 -070085extern void paging_init(void);
86
87/*
88 * Conversion functions: convert a page and protection to a page entry,
89 * and a page entry and page directory to the page they refer to.
90 */
Franck Bui-Huuc9d06962007-03-19 17:36:42 +010091#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
Ralf Baechle970d0322012-10-18 13:54:15 +020092
93#define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
94#ifndef CONFIG_TRANSPARENT_HUGEPAGE
95#define pmd_page(pmd) __pmd_page(pmd)
96#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
97
Dave McCracken46a82b22006-09-25 23:31:48 -070098#define pmd_page_vaddr(pmd) pmd_val(pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
Markos Chandrasf1014d12014-07-14 12:47:09 +0100100#define htw_stop() \
101do { \
Markos Chandrased4cbc82015-01-26 13:04:33 +0000102 unsigned long flags; \
103 \
Markos Chandras461d15972015-01-26 09:40:34 +0000104 if (cpu_has_htw) { \
Markos Chandrased4cbc82015-01-26 13:04:33 +0000105 local_irq_save(flags); \
106 if(!raw_current_cpu_data.htw_seq++) { \
107 write_c0_pwctl(read_c0_pwctl() & \
108 ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \
109 back_to_back_c0_hazard(); \
110 } \
111 local_irq_restore(flags); \
Markos Chandras461d15972015-01-26 09:40:34 +0000112 } \
Markos Chandrasf1014d12014-07-14 12:47:09 +0100113} while(0)
114
115#define htw_start() \
116do { \
Markos Chandrased4cbc82015-01-26 13:04:33 +0000117 unsigned long flags; \
118 \
Markos Chandrasf1014d12014-07-14 12:47:09 +0100119 if (cpu_has_htw) { \
Markos Chandrased4cbc82015-01-26 13:04:33 +0000120 local_irq_save(flags); \
121 if (!--raw_current_cpu_data.htw_seq) { \
122 write_c0_pwctl(read_c0_pwctl() | \
123 (1 << MIPS_PWCTL_PWEN_SHIFT)); \
124 back_to_back_c0_hazard(); \
125 } \
126 local_irq_restore(flags); \
Markos Chandrasf1014d12014-07-14 12:47:09 +0100127 } \
128} while(0)
129
Ralf Baechle34adb282014-11-22 00:16:48 +0100130#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400131
Steven J. Hillc5b36782015-02-26 18:16:38 -0600132#define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL))
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400133#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
134
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135static inline void set_pte(pte_t *ptep, pte_t pte)
136{
137 ptep->pte_high = pte.pte_high;
138 smp_wmb();
139 ptep->pte_low = pte.pte_low;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
Steven J. Hillc5b36782015-02-26 18:16:38 -0600141 if (pte.pte_high & _PAGE_GLOBAL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 pte_t *buddy = ptep_buddy(ptep);
143 /*
144 * Make sure the buddy is global too (if it's !none,
145 * it better already be global)
146 */
Steven J. Hillc5b36782015-02-26 18:16:38 -0600147 if (pte_none(*buddy))
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400148 buddy->pte_high |= _PAGE_GLOBAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 }
150}
Lars Persson5b9593f2015-02-26 14:16:02 +0100151#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
153static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
154{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400155 pte_t null = __pte(0);
156
Markos Chandrasfde35382015-01-26 09:40:36 +0000157 htw_stop();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 /* Preserve global status for the pair */
Steven J. Hillc5b36782015-02-26 18:16:38 -0600159 if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
160 null.pte_high = _PAGE_GLOBAL;
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400161
162 set_pte_at(mm, addr, ptep, null);
Markos Chandrasfde35382015-01-26 09:40:36 +0000163 htw_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164}
165#else
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400166
167#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
168#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170/*
171 * Certain architectures need to do special things when pte's
172 * within a page table are directly modified. Thus, the following
173 * hook is made available.
174 */
175static inline void set_pte(pte_t *ptep, pte_t pteval)
176{
177 *ptep = pteval;
178#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
179 if (pte_val(pteval) & _PAGE_GLOBAL) {
180 pte_t *buddy = ptep_buddy(ptep);
181 /*
182 * Make sure the buddy is global too (if it's !none,
183 * it better already be global)
184 */
David Daney46011e62015-08-03 17:48:43 -0700185#ifdef CONFIG_SMP
186 /*
187 * For SMP, multiple CPUs can race, so we need to do
188 * this atomically.
189 */
David Daney46011e62015-08-03 17:48:43 -0700190 unsigned long page_global = _PAGE_GLOBAL;
191 unsigned long tmp;
192
Joshua Kinard12863932015-09-07 06:42:30 -0400193 if (kernel_uses_llsc && R10000_LLSC_WAR) {
194 __asm__ __volatile__ (
195 " .set arch=r4000 \n"
196 " .set push \n"
197 " .set noreorder \n"
198 "1:" __LL "%[tmp], %[buddy] \n"
199 " bnez %[tmp], 2f \n"
200 " or %[tmp], %[tmp], %[global] \n"
201 __SC "%[tmp], %[buddy] \n"
202 " beqzl %[tmp], 1b \n"
203 " nop \n"
204 "2: \n"
205 " .set pop \n"
206 " .set mips0 \n"
207 : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
David Daney46011e62015-08-03 17:48:43 -0700208 : [global] "r" (page_global));
Joshua Kinard12863932015-09-07 06:42:30 -0400209 } else if (kernel_uses_llsc) {
210 __asm__ __volatile__ (
211 " .set "MIPS_ISA_ARCH_LEVEL" \n"
212 " .set push \n"
213 " .set noreorder \n"
214 "1:" __LL "%[tmp], %[buddy] \n"
215 " bnez %[tmp], 2f \n"
216 " or %[tmp], %[tmp], %[global] \n"
217 __SC "%[tmp], %[buddy] \n"
218 " beqz %[tmp], 1b \n"
219 " nop \n"
220 "2: \n"
221 " .set pop \n"
222 " .set mips0 \n"
223 : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
224 : [global] "r" (page_global));
225 }
David Daney46011e62015-08-03 17:48:43 -0700226#else /* !CONFIG_SMP */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 if (pte_none(*buddy))
228 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
David Daney46011e62015-08-03 17:48:43 -0700229#endif /* CONFIG_SMP */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 }
231#endif
232}
Lars Persson5b9593f2015-02-26 14:16:02 +0100233#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
235static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
236{
Markos Chandrasfde35382015-01-26 09:40:36 +0000237 htw_stop();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
239 /* Preserve global status for the pair */
240 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
241 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
242 else
243#endif
244 set_pte_at(mm, addr, ptep, __pte(0));
Markos Chandrasfde35382015-01-26 09:40:36 +0000245 htw_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246}
247#endif
248
249/*
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000250 * (pmds are folded into puds so this doesn't get actually called,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 * but the define is needed for a generic inline function.)
252 */
253#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000254
David Daney325f8a02009-12-04 13:52:36 -0800255#ifndef __PAGETABLE_PMD_FOLDED
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000256/*
257 * (puds are folded into pgds so this doesn't get actually called,
258 * but the define is needed for a generic inline function.)
259 */
260#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
261#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
Ralf Baechle5ff97472007-08-01 15:25:28 +0100263#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
264#define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
265#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
Ralf Baechle9975e772007-08-13 12:44:41 +0100267/*
268 * We used to declare this array with size but gcc 3.3 and older are not able
269 * to find that this expression is a constant, so the size is dropped.
270 */
271extern pgd_t swapper_pg_dir[];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
273/*
274 * The following only work if pte_present() is true.
275 * Undefined behaviour if not..
276 */
Ralf Baechle34adb282014-11-22 00:16:48 +0100277#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400278static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
279static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
280static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400281
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282static inline pte_t pte_wrprotect(pte_t pte)
283{
Steven J. Hillc5b36782015-02-26 18:16:38 -0600284 pte.pte_low &= ~_PAGE_WRITE;
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400285 pte.pte_high &= ~_PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 return pte;
287}
288
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289static inline pte_t pte_mkclean(pte_t pte)
290{
Steven J. Hillc5b36782015-02-26 18:16:38 -0600291 pte.pte_low &= ~_PAGE_MODIFIED;
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400292 pte.pte_high &= ~_PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 return pte;
294}
295
296static inline pte_t pte_mkold(pte_t pte)
297{
Steven J. Hillc5b36782015-02-26 18:16:38 -0600298 pte.pte_low &= ~_PAGE_ACCESSED;
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400299 pte.pte_high &= ~_PAGE_SILENT_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 return pte;
301}
302
303static inline pte_t pte_mkwrite(pte_t pte)
304{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400305 pte.pte_low |= _PAGE_WRITE;
Steven J. Hillc5b36782015-02-26 18:16:38 -0600306 if (pte.pte_low & _PAGE_MODIFIED)
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400307 pte.pte_high |= _PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 return pte;
309}
310
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311static inline pte_t pte_mkdirty(pte_t pte)
312{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400313 pte.pte_low |= _PAGE_MODIFIED;
Steven J. Hillc5b36782015-02-26 18:16:38 -0600314 if (pte.pte_low & _PAGE_WRITE)
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400315 pte.pte_high |= _PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 return pte;
317}
318
319static inline pte_t pte_mkyoung(pte_t pte)
320{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400321 pte.pte_low |= _PAGE_ACCESSED;
Steven J. Hillc5b36782015-02-26 18:16:38 -0600322 if (pte.pte_low & _PAGE_READ)
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400323 pte.pte_high |= _PAGE_SILENT_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 return pte;
325}
326#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
328static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
329static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
331static inline pte_t pte_wrprotect(pte_t pte)
332{
333 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
334 return pte;
335}
336
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337static inline pte_t pte_mkclean(pte_t pte)
338{
Steven J. Hill77a5c592014-11-13 09:52:01 -0600339 pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 return pte;
341}
342
343static inline pte_t pte_mkold(pte_t pte)
344{
Steven J. Hill77a5c592014-11-13 09:52:01 -0600345 pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 return pte;
347}
348
349static inline pte_t pte_mkwrite(pte_t pte)
350{
351 pte_val(pte) |= _PAGE_WRITE;
352 if (pte_val(pte) & _PAGE_MODIFIED)
353 pte_val(pte) |= _PAGE_SILENT_WRITE;
354 return pte;
355}
356
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357static inline pte_t pte_mkdirty(pte_t pte)
358{
359 pte_val(pte) |= _PAGE_MODIFIED;
360 if (pte_val(pte) & _PAGE_WRITE)
361 pte_val(pte) |= _PAGE_SILENT_WRITE;
362 return pte;
363}
364
365static inline pte_t pte_mkyoung(pte_t pte)
366{
367 pte_val(pte) |= _PAGE_ACCESSED;
Huacai Chen4f33f6c2016-01-21 21:09:52 +0800368#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
Steven J. Hillbe0c37c2015-02-26 18:16:37 -0600369 if (!(pte_val(pte) & _PAGE_NO_READ))
370 pte_val(pte) |= _PAGE_SILENT_READ;
371 else
372#endif
373 if (pte_val(pte) & _PAGE_READ)
374 pte_val(pte) |= _PAGE_SILENT_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 return pte;
376}
David Daneydd794392009-05-27 17:47:43 -0700377
Steven J. Hill05f98832015-02-19 10:18:50 -0600378#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daneydd794392009-05-27 17:47:43 -0700379static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
380
381static inline pte_t pte_mkhuge(pte_t pte)
382{
383 pte_val(pte) |= _PAGE_HUGE;
384 return pte;
385}
Steven J. Hill05f98832015-02-19 10:18:50 -0600386#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387#endif
Nick Piggin7e675132008-04-28 02:13:00 -0700388static inline int pte_special(pte_t pte) { return 0; }
389static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390
391/*
Ralf Baechle70342282013-01-22 12:59:30 +0100392 * Macro to make mark a page protection value as "uncacheable". Note
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 * that "protection" is really a misnomer here as the protection value
394 * contains the memory attribute bits, dirty bits, and various other
395 * bits as well.
396 */
397#define pgprot_noncached pgprot_noncached
398
399static inline pgprot_t pgprot_noncached(pgprot_t _prot)
400{
401 unsigned long prot = pgprot_val(_prot);
402
403 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
404
405 return __pgprot(prot);
406}
407
Alex Smithc4687b12015-07-24 16:16:10 +0100408#define pgprot_writecombine pgprot_writecombine
409
Markos Chandras4b050ba2014-07-18 10:51:33 +0100410static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
411{
412 unsigned long prot = pgprot_val(_prot);
413
414 /* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */
415 prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine;
416
417 return __pgprot(prot);
418}
419
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420/*
421 * Conversion functions: convert a page and protection to a page entry,
422 * and a page entry and page directory to the page they refer to.
423 */
424#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
425
Ralf Baechle34adb282014-11-22 00:16:48 +0100426#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
428{
Steven J. Hillc5b36782015-02-26 18:16:38 -0600429 pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
Steven J. Hill77a5c592014-11-13 09:52:01 -0600430 pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
Steven J. Hillc5b36782015-02-26 18:16:38 -0600431 pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK;
432 pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 return pte;
434}
435#else
436static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
437{
438 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
439}
440#endif
441
442
443extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
444 pte_t pte);
Lars Persson5b9593f2015-02-26 14:16:02 +0100445extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
446 pte_t pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
448static inline void update_mmu_cache(struct vm_area_struct *vma,
Russell King4b3073e2009-12-18 16:40:18 +0000449 unsigned long address, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450{
Russell King4b3073e2009-12-18 16:40:18 +0000451 pte_t pte = *ptep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 __update_tlb(vma, address, pte);
Lars Persson5b9593f2015-02-26 14:16:02 +0100453 __update_cache(vma, address, pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454}
455
Ralf Baechle970d0322012-10-18 13:54:15 +0200456static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
457 unsigned long address, pmd_t *pmdp)
458{
459 pte_t pte = *(pte_t *)pmdp;
460
461 __update_tlb(vma, address, pte);
462}
463
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464#define kern_addr_valid(addr) (1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465
Ralf Baechle34adb282014-11-22 00:16:48 +0100466#ifdef CONFIG_PHYS_ADDR_T_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
468
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469static inline int io_remap_pfn_range(struct vm_area_struct *vma,
470 unsigned long vaddr,
471 unsigned long pfn,
472 unsigned long size,
473 pgprot_t prot)
474{
Ralf Baechle15d45cc2014-11-22 00:22:09 +0100475 phys_addr_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
Thiemo Seuferac5d8c02005-04-11 12:24:16 +0000476 return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477}
Al Viro40d158e2013-05-11 12:13:10 -0400478#define io_remap_pfn_range io_remap_pfn_range
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479#endif
480
Ralf Baechle970d0322012-10-18 13:54:15 +0200481#ifdef CONFIG_TRANSPARENT_HUGEPAGE
482
483extern int has_transparent_hugepage(void);
484
485static inline int pmd_trans_huge(pmd_t pmd)
486{
487 return !!(pmd_val(pmd) & _PAGE_HUGE);
488}
489
490static inline pmd_t pmd_mkhuge(pmd_t pmd)
491{
492 pmd_val(pmd) |= _PAGE_HUGE;
493
494 return pmd;
495}
496
Ralf Baechle970d0322012-10-18 13:54:15 +0200497extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
498 pmd_t *pmdp, pmd_t pmd);
499
Ralf Baechle970d0322012-10-18 13:54:15 +0200500#define __HAVE_ARCH_PMD_WRITE
501static inline int pmd_write(pmd_t pmd)
502{
503 return !!(pmd_val(pmd) & _PAGE_WRITE);
504}
505
506static inline pmd_t pmd_wrprotect(pmd_t pmd)
507{
508 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
509 return pmd;
510}
511
512static inline pmd_t pmd_mkwrite(pmd_t pmd)
513{
514 pmd_val(pmd) |= _PAGE_WRITE;
515 if (pmd_val(pmd) & _PAGE_MODIFIED)
516 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
517
518 return pmd;
519}
520
521static inline int pmd_dirty(pmd_t pmd)
522{
523 return !!(pmd_val(pmd) & _PAGE_MODIFIED);
524}
525
526static inline pmd_t pmd_mkclean(pmd_t pmd)
527{
528 pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
529 return pmd;
530}
531
532static inline pmd_t pmd_mkdirty(pmd_t pmd)
533{
534 pmd_val(pmd) |= _PAGE_MODIFIED;
535 if (pmd_val(pmd) & _PAGE_WRITE)
536 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
537
538 return pmd;
539}
540
541static inline int pmd_young(pmd_t pmd)
542{
543 return !!(pmd_val(pmd) & _PAGE_ACCESSED);
544}
545
546static inline pmd_t pmd_mkold(pmd_t pmd)
547{
548 pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
549
550 return pmd;
551}
552
553static inline pmd_t pmd_mkyoung(pmd_t pmd)
554{
555 pmd_val(pmd) |= _PAGE_ACCESSED;
556
Huacai Chen4f33f6c2016-01-21 21:09:52 +0800557#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
Steven J. Hillbe0c37c2015-02-26 18:16:37 -0600558 if (!(pmd_val(pmd) & _PAGE_NO_READ))
559 pmd_val(pmd) |= _PAGE_SILENT_READ;
560 else
561#endif
562 if (pmd_val(pmd) & _PAGE_READ)
563 pmd_val(pmd) |= _PAGE_SILENT_READ;
Ralf Baechle970d0322012-10-18 13:54:15 +0200564
565 return pmd;
566}
567
568/* Extern to avoid header file madness */
569extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
570
571static inline unsigned long pmd_pfn(pmd_t pmd)
572{
573 return pmd_val(pmd) >> _PFN_SHIFT;
574}
575
576static inline struct page *pmd_page(pmd_t pmd)
577{
578 if (pmd_trans_huge(pmd))
579 return pfn_to_page(pmd_pfn(pmd));
580
581 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
582}
583
584static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
585{
586 pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot);
587 return pmd;
588}
589
590static inline pmd_t pmd_mknotpresent(pmd_t pmd)
591{
592 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
593
594 return pmd;
595}
596
597/*
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700598 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
Ralf Baechle970d0322012-10-18 13:54:15 +0200599 * different prototype.
600 */
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700601#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
602static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
603 unsigned long address, pmd_t *pmdp)
Ralf Baechle970d0322012-10-18 13:54:15 +0200604{
605 pmd_t old = *pmdp;
606
607 pmd_clear(pmdp);
608
609 return old;
610}
611
612#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
613
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614#include <asm-generic/pgtable.h>
615
616/*
Wu Zhangjin22f1fdf2009-11-11 13:59:23 +0800617 * uncached accelerated TLB map for video memory access
618 */
619#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
620#define __HAVE_PHYS_MEM_ACCESS_PROT
621
622struct file;
623pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
624 unsigned long size, pgprot_t vma_prot);
625int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
626 unsigned long size, pgprot_t *vma_prot);
627#endif
628
629/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 * We provide our own get_unmapped area to cope with the virtual aliasing
631 * constraints placed on us by the cache architecture.
632 */
633#define HAVE_ARCH_UNMAPPED_AREA
Jian Pengd0be89f2011-05-17 12:27:49 -0700634#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635
636/*
637 * No page table caches to initialise
638 */
639#define pgtable_cache_init() do { } while (0)
640
641#endif /* _ASM_PGTABLE_H */