Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Vineet Gupta | 3be80aa | 2013-01-18 15:12:17 +0530 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
Vineet Gupta | 3be80aa | 2013-01-18 15:12:17 +0530 | [diff] [blame] | 4 | */ |
Vineet Gupta | 3be80aa | 2013-01-18 15:12:17 +0530 | [diff] [blame] | 5 | #ifndef __ASM_ARC_PAGE_H |
| 6 | #define __ASM_ARC_PAGE_H |
| 7 | |
Vineet Gupta | 8c2f4a8 | 2013-02-11 19:55:33 +0530 | [diff] [blame] | 8 | #include <uapi/asm/page.h> |
Vineet Gupta | 3be80aa | 2013-01-18 15:12:17 +0530 | [diff] [blame] | 9 | |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 10 | #ifndef __ASSEMBLY__ |
| 11 | |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 12 | #define clear_page(paddr) memset((paddr), 0, PAGE_SIZE) |
| 13 | #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) |
| 14 | |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 15 | struct vm_area_struct; |
| 16 | struct page; |
| 17 | |
| 18 | #define __HAVE_ARCH_COPY_USER_HIGHPAGE |
| 19 | |
| 20 | void copy_user_highpage(struct page *to, struct page *from, |
| 21 | unsigned long u_vaddr, struct vm_area_struct *vma); |
| 22 | void clear_user_page(void *to, unsigned long u_vaddr, struct page *page); |
| 23 | |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 24 | #undef STRICT_MM_TYPECHECKS |
| 25 | |
| 26 | #ifdef STRICT_MM_TYPECHECKS |
| 27 | /* |
| 28 | * These are used to make use of C type-checking.. |
| 29 | */ |
| 30 | typedef struct { |
Vineet Gupta | 5035cd5 | 2016-05-03 14:53:40 +0530 | [diff] [blame] | 31 | #ifdef CONFIG_ARC_HAS_PAE40 |
| 32 | unsigned long long pte; |
| 33 | #else |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 34 | unsigned long pte; |
Vineet Gupta | 5035cd5 | 2016-05-03 14:53:40 +0530 | [diff] [blame] | 35 | #endif |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 36 | } pte_t; |
| 37 | typedef struct { |
| 38 | unsigned long pgd; |
| 39 | } pgd_t; |
| 40 | typedef struct { |
| 41 | unsigned long pgprot; |
| 42 | } pgprot_t; |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 43 | |
| 44 | #define pte_val(x) ((x).pte) |
| 45 | #define pgd_val(x) ((x).pgd) |
| 46 | #define pgprot_val(x) ((x).pgprot) |
| 47 | |
| 48 | #define __pte(x) ((pte_t) { (x) }) |
| 49 | #define __pgd(x) ((pgd_t) { (x) }) |
| 50 | #define __pgprot(x) ((pgprot_t) { (x) }) |
| 51 | |
Gilad Ben-Yossef | 4368902 | 2013-01-22 16:48:45 +0530 | [diff] [blame] | 52 | #define pte_pgprot(x) __pgprot(pte_val(x)) |
| 53 | |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 54 | #else /* !STRICT_MM_TYPECHECKS */ |
| 55 | |
Vineet Gupta | 5a364c2 | 2015-02-06 18:44:57 +0300 | [diff] [blame] | 56 | #ifdef CONFIG_ARC_HAS_PAE40 |
| 57 | typedef unsigned long long pte_t; |
| 58 | #else |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 59 | typedef unsigned long pte_t; |
Vineet Gupta | 5a364c2 | 2015-02-06 18:44:57 +0300 | [diff] [blame] | 60 | #endif |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 61 | typedef unsigned long pgd_t; |
| 62 | typedef unsigned long pgprot_t; |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 63 | |
| 64 | #define pte_val(x) (x) |
| 65 | #define pgd_val(x) (x) |
| 66 | #define pgprot_val(x) (x) |
| 67 | #define __pte(x) (x) |
Vineet Gupta | fe6c1b8 | 2014-07-08 18:43:47 +0530 | [diff] [blame] | 68 | #define __pgd(x) (x) |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 69 | #define __pgprot(x) (x) |
Gilad Ben-Yossef | 4368902 | 2013-01-22 16:48:45 +0530 | [diff] [blame] | 70 | #define pte_pgprot(x) (x) |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 71 | |
| 72 | #endif |
| 73 | |
Vineet Gupta | e8a7596 | 2015-08-28 08:39:57 +0530 | [diff] [blame] | 74 | typedef pte_t * pgtable_t; |
| 75 | |
Vineet Gupta | 2519d75 | 2016-05-05 14:53:48 +0530 | [diff] [blame] | 76 | /* |
| 77 | * Use virt_to_pfn with caution: |
| 78 | * If used in pte or paddr related macros, it could cause truncation |
| 79 | * in PAE40 builds |
| 80 | * As a rule of thumb, only use it in helpers starting with virt_ |
| 81 | * You have been warned ! |
| 82 | */ |
Vineet Gupta | c2ff5cf2 | 2015-12-18 13:57:41 +0530 | [diff] [blame] | 83 | #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 84 | |
Mike Rapoport | 050b2da | 2020-12-14 19:10:04 -0800 | [diff] [blame] | 85 | /* |
| 86 | * When HIGHMEM is enabled we have holes in the memory map so we need |
| 87 | * pfn_valid() that takes into account the actual extents of the physical |
| 88 | * memory |
| 89 | */ |
| 90 | #ifdef CONFIG_HIGHMEM |
Vineet Gupta | c2ff5cf2 | 2015-12-18 13:57:41 +0530 | [diff] [blame] | 91 | |
Mike Rapoport | 050b2da | 2020-12-14 19:10:04 -0800 | [diff] [blame] | 92 | extern unsigned long arch_pfn_offset; |
| 93 | #define ARCH_PFN_OFFSET arch_pfn_offset |
| 94 | |
| 95 | extern int pfn_valid(unsigned long pfn); |
| 96 | #define pfn_valid pfn_valid |
| 97 | |
| 98 | #else /* CONFIG_HIGHMEM */ |
| 99 | |
| 100 | #define ARCH_PFN_OFFSET virt_to_pfn(CONFIG_LINUX_RAM_BASE) |
Vineet Gupta | c2ff5cf2 | 2015-12-18 13:57:41 +0530 | [diff] [blame] | 101 | #define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr) |
Mike Rapoport | 050b2da | 2020-12-14 19:10:04 -0800 | [diff] [blame] | 102 | |
| 103 | #endif /* CONFIG_HIGHMEM */ |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 104 | |
| 105 | /* |
| 106 | * __pa, __va, virt_to_page (ALERT: deprecated, don't use them) |
| 107 | * |
| 108 | * These macros have historically been misnamed |
| 109 | * virt here means link-address/program-address as embedded in object code. |
Vineet Gupta | c2ff5cf2 | 2015-12-18 13:57:41 +0530 | [diff] [blame] | 110 | * And for ARC, link-addr = physical address |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 111 | */ |
Vineet Gupta | 2519d75 | 2016-05-05 14:53:48 +0530 | [diff] [blame] | 112 | #define __pa(vaddr) ((unsigned long)(vaddr)) |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 113 | #define __va(paddr) ((void *)((unsigned long)(paddr))) |
| 114 | |
Vineet Gupta | 26f9d5f | 2016-04-18 10:49:56 +0530 | [diff] [blame] | 115 | #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) |
Vineet Gupta | c2ff5cf2 | 2015-12-18 13:57:41 +0530 | [diff] [blame] | 116 | #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 117 | |
Vineet Gupta | 3abc944 | 2013-06-05 17:49:14 +0530 | [diff] [blame] | 118 | /* Default Permissions for stack/heaps pages (Non Executable) */ |
Anshuman Khandual | c62da0c | 2020-04-10 14:33:05 -0700 | [diff] [blame] | 119 | #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 120 | |
| 121 | #define WANT_PAGE_VIRTUAL 1 |
| 122 | |
| 123 | #include <asm-generic/memory_model.h> /* page_to_pfn, pfn_to_page */ |
| 124 | #include <asm-generic/getorder.h> |
| 125 | |
Vineet Gupta | 3be80aa | 2013-01-18 15:12:17 +0530 | [diff] [blame] | 126 | #endif /* !__ASSEMBLY__ */ |
| 127 | |
Vineet Gupta | 3be80aa | 2013-01-18 15:12:17 +0530 | [diff] [blame] | 128 | #endif |