Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 1 | /* |
Chris Zankel | 26465f2 | 2007-08-06 23:12:24 -0700 | [diff] [blame] | 2 | * include/asm-xtensa/page.h |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
Chris Zankel | 26465f2 | 2007-08-06 23:12:24 -0700 | [diff] [blame] | 8 | * Copyright (C) 2001 - 2007 Tensilica Inc. |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 9 | */ |
| 10 | |
| 11 | #ifndef _XTENSA_PAGE_H |
| 12 | #define _XTENSA_PAGE_H |
| 13 | |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 14 | #include <asm/processor.h> |
Chris Zankel | 26465f2 | 2007-08-06 23:12:24 -0700 | [diff] [blame] | 15 | #include <asm/types.h> |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 16 | #include <asm/cache.h> |
Johannes Weiner | c947a58 | 2009-03-04 16:21:30 +0100 | [diff] [blame] | 17 | #include <platform/hardware.h> |
Max Filippov | f1883aa | 2016-04-11 21:07:30 +0300 | [diff] [blame] | 18 | #include <asm/kmem_layout.h> |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 19 | |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 20 | /* |
| 21 | * PAGE_SHIFT determines the page size |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 22 | */ |
| 23 | |
Chris Zankel | c4c4594 | 2012-11-28 16:53:51 -0800 | [diff] [blame] | 24 | #define PAGE_SHIFT 12 |
| 25 | #define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT) |
| 26 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 27 | |
Johannes Weiner | e5083a6 | 2009-03-04 16:21:31 +0100 | [diff] [blame] | 28 | #ifdef CONFIG_MMU |
Chris Zankel | c4c4594 | 2012-11-28 16:53:51 -0800 | [diff] [blame] | 29 | #define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR |
Max Filippov | a9f2fc6 | 2016-04-13 05:20:02 +0300 | [diff] [blame] | 30 | #define PHYS_OFFSET XCHAL_KSEG_PADDR |
Max Filippov | d39af90 | 2016-04-11 21:14:17 +0300 | [diff] [blame] | 31 | #define MAX_LOW_PFN (PHYS_PFN(XCHAL_KSEG_PADDR) + \ |
| 32 | PHYS_PFN(XCHAL_KSEG_SIZE)) |
Johannes Weiner | e5083a6 | 2009-03-04 16:21:31 +0100 | [diff] [blame] | 33 | #else |
Max Filippov | 3de0048 | 2016-07-23 02:47:58 +0300 | [diff] [blame] | 34 | #define PAGE_OFFSET PLATFORM_DEFAULT_MEM_START |
| 35 | #define PHYS_OFFSET PLATFORM_DEFAULT_MEM_START |
| 36 | #define MAX_LOW_PFN PHYS_PFN(0xfffffffful) |
Johannes Weiner | e5083a6 | 2009-03-04 16:21:31 +0100 | [diff] [blame] | 37 | #endif |
| 38 | |
Chris Zankel | c4c4594 | 2012-11-28 16:53:51 -0800 | [diff] [blame] | 39 | #define PGTABLE_START 0x80000000 |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 40 | |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 41 | /* |
| 42 | * Cache aliasing: |
| 43 | * |
| 44 | * If the cache size for one way is greater than the page size, we have to |
| 45 | * deal with cache aliasing. The cache index is wider than the page size: |
| 46 | * |
| 47 | * | |cache| cache index |
| 48 | * | pfn |off| virtual address |
| 49 | * |xxxx:X|zzz| |
| 50 | * | : | | |
| 51 | * | \ / | | |
| 52 | * |trans.| | |
| 53 | * | / \ | | |
| 54 | * |yyyy:Y|zzz| physical address |
| 55 | * |
| 56 | * When the page number is translated to the physical page address, the lowest |
| 57 | * bit(s) (X) that are part of the cache index are also translated (Y). |
| 58 | * If this translation changes bit(s) (X), the cache index is also afected, |
| 59 | * thus resulting in a different cache line than before. |
| 60 | * The kernel does not provide a mechanism to ensure that the page color |
| 61 | * (represented by this bit) remains the same when allocated or when pages |
| 62 | * are remapped. When user pages are mapped into kernel space, the color of |
| 63 | * the page might also change. |
| 64 | * |
| 65 | * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2 |
| 66 | * to temporarily map a patch so we can match the color. |
| 67 | */ |
| 68 | |
| 69 | #if DCACHE_WAY_SIZE > PAGE_SIZE |
| 70 | # define DCACHE_ALIAS_ORDER (DCACHE_WAY_SHIFT - PAGE_SHIFT) |
| 71 | # define DCACHE_ALIAS_MASK (PAGE_MASK & (DCACHE_WAY_SIZE - 1)) |
| 72 | # define DCACHE_ALIAS(a) (((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT) |
| 73 | # define DCACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0) |
| 74 | #else |
| 75 | # define DCACHE_ALIAS_ORDER 0 |
Max Filippov | 32544d9 | 2014-07-15 02:51:49 +0400 | [diff] [blame] | 76 | # define DCACHE_ALIAS(a) ((void)(a), 0) |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 77 | #endif |
Max Filippov | 32544d9 | 2014-07-15 02:51:49 +0400 | [diff] [blame] | 78 | #define DCACHE_N_COLORS (1 << DCACHE_ALIAS_ORDER) |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 79 | |
| 80 | #if ICACHE_WAY_SIZE > PAGE_SIZE |
| 81 | # define ICACHE_ALIAS_ORDER (ICACHE_WAY_SHIFT - PAGE_SHIFT) |
| 82 | # define ICACHE_ALIAS_MASK (PAGE_MASK & (ICACHE_WAY_SIZE - 1)) |
| 83 | # define ICACHE_ALIAS(a) (((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT) |
| 84 | # define ICACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & ICACHE_ALIAS_MASK) == 0) |
| 85 | #else |
| 86 | # define ICACHE_ALIAS_ORDER 0 |
| 87 | #endif |
| 88 | |
| 89 | |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 90 | #ifdef __ASSEMBLY__ |
| 91 | |
| 92 | #define __pgprot(x) (x) |
| 93 | |
| 94 | #else |
| 95 | |
| 96 | /* |
| 97 | * These are used to make use of C type-checking.. |
| 98 | */ |
| 99 | |
| 100 | typedef struct { unsigned long pte; } pte_t; /* page table entry */ |
| 101 | typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */ |
| 102 | typedef struct { unsigned long pgprot; } pgprot_t; |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 103 | typedef struct page *pgtable_t; |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 104 | |
| 105 | #define pte_val(x) ((x).pte) |
| 106 | #define pgd_val(x) ((x).pgd) |
| 107 | #define pgprot_val(x) ((x).pgprot) |
| 108 | |
| 109 | #define __pte(x) ((pte_t) { (x) } ) |
| 110 | #define __pgd(x) ((pgd_t) { (x) } ) |
| 111 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
| 112 | |
| 113 | /* |
| 114 | * Pure 2^n version of get_order |
Chris Zankel | 26465f2 | 2007-08-06 23:12:24 -0700 | [diff] [blame] | 115 | * Use 'nsau' instructions if supported by the processor or the generic version. |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 116 | */ |
| 117 | |
Chris Zankel | 26465f2 | 2007-08-06 23:12:24 -0700 | [diff] [blame] | 118 | #if XCHAL_HAVE_NSA |
| 119 | |
| 120 | static inline __attribute_const__ int get_order(unsigned long size) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 121 | { |
Chris Zankel | 26465f2 | 2007-08-06 23:12:24 -0700 | [diff] [blame] | 122 | int lz; |
| 123 | asm ("nsau %0, %1" : "=r" (lz) : "r" ((size - 1) >> PAGE_SHIFT)); |
| 124 | return 32 - lz; |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 125 | } |
| 126 | |
Chris Zankel | 26465f2 | 2007-08-06 23:12:24 -0700 | [diff] [blame] | 127 | #else |
| 128 | |
Arnd Bergmann | 5b17e1c | 2009-05-13 22:56:30 +0000 | [diff] [blame] | 129 | # include <asm-generic/getorder.h> |
Chris Zankel | 26465f2 | 2007-08-06 23:12:24 -0700 | [diff] [blame] | 130 | |
| 131 | #endif |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 132 | |
| 133 | struct page; |
Max Filippov | a91902d | 2014-07-21 18:54:11 +0400 | [diff] [blame] | 134 | struct vm_area_struct; |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 135 | extern void clear_page(void *page); |
| 136 | extern void copy_page(void *to, void *from); |
| 137 | |
| 138 | /* |
| 139 | * If we have cache aliasing and writeback caches, we might have to do |
| 140 | * some extra work |
| 141 | */ |
| 142 | |
Max Filippov | b6cee17 | 2014-09-22 09:54:42 +0400 | [diff] [blame] | 143 | #if defined(CONFIG_MMU) && DCACHE_WAY_SIZE > PAGE_SIZE |
Max Filippov | a91902d | 2014-07-21 18:54:11 +0400 | [diff] [blame] | 144 | extern void clear_page_alias(void *vaddr, unsigned long paddr); |
| 145 | extern void copy_page_alias(void *to, void *from, |
| 146 | unsigned long to_paddr, unsigned long from_paddr); |
| 147 | |
| 148 | #define clear_user_highpage clear_user_highpage |
| 149 | void clear_user_highpage(struct page *page, unsigned long vaddr); |
| 150 | #define __HAVE_ARCH_COPY_USER_HIGHPAGE |
| 151 | void copy_user_highpage(struct page *to, struct page *from, |
| 152 | unsigned long vaddr, struct vm_area_struct *vma); |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 153 | #else |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 154 | # define clear_user_page(page, vaddr, pg) clear_page(page) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 155 | # define copy_user_page(to, from, vaddr, pg) copy_page(to, from) |
| 156 | #endif |
| 157 | |
| 158 | /* |
| 159 | * This handles the memory map. We handle pages at |
| 160 | * XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space. |
| 161 | * These macros are for conversion of kernel address, not user |
| 162 | * addresses. |
| 163 | */ |
| 164 | |
Max Filippov | 3de0048 | 2016-07-23 02:47:58 +0300 | [diff] [blame] | 165 | #define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) |
Johannes Weiner | c947a58 | 2009-03-04 16:21:30 +0100 | [diff] [blame] | 166 | |
Max Filippov | a9f2fc6 | 2016-04-13 05:20:02 +0300 | [diff] [blame] | 167 | #define __pa(x) \ |
| 168 | ((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET) |
| 169 | #define __va(x) \ |
| 170 | ((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET)) |
Chris Zankel | c4c4594 | 2012-11-28 16:53:51 -0800 | [diff] [blame] | 171 | #define pfn_valid(pfn) \ |
| 172 | ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr) |
| 173 | |
KAMEZAWA Hiroyuki | 655a044 | 2006-03-27 01:15:52 -0800 | [diff] [blame] | 174 | #ifdef CONFIG_DISCONTIGMEM |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 175 | # error CONFIG_DISCONTIGMEM not supported |
| 176 | #endif |
| 177 | |
| 178 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
| 179 | #define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) |
| 180 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
| 181 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) |
| 182 | |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 183 | #endif /* __ASSEMBLY__ */ |
| 184 | |
| 185 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ |
| 186 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
| 187 | |
KAMEZAWA Hiroyuki | 655a044 | 2006-03-27 01:15:52 -0800 | [diff] [blame] | 188 | #include <asm-generic/memory_model.h> |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 189 | #endif /* _XTENSA_PAGE_H */ |