Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef _ASM_IA64_UACCESS_H |
| 3 | #define _ASM_IA64_UACCESS_H |
| 4 | |
| 5 | /* |
| 6 | * This file defines various macros to transfer memory areas across |
| 7 | * the user/kernel boundary. This needs to be done carefully because |
| 8 | * this code is executed in kernel mode and uses user-specified |
| 9 | * addresses. Thus, we need to be careful not to let the user to |
| 10 | * trick us into accessing kernel memory that would normally be |
| 11 | * inaccessible. This code is also fairly performance sensitive, |
| 12 | * so we want to spend as little time doing safety checks as |
| 13 | * possible. |
| 14 | * |
| 15 | * To make matters a bit more interesting, these macros sometimes also |
| 16 | * called from within the kernel itself, in which case the address |
| 17 | * validity check must be skipped. The get_fs() macro tells us what |
| 18 | * to do: if get_fs()==USER_DS, checking is performed, if |
| 19 | * get_fs()==KERNEL_DS, checking is bypassed. |
| 20 | * |
| 21 | * Note that even if the memory area specified by the user is in a |
| 22 | * valid address range, it is still possible that we'll get a page |
| 23 | * fault while accessing it. This is handled by filling out an |
| 24 | * exception handler fixup entry for each instruction that has the |
| 25 | * potential to fault. When such a fault occurs, the page fault |
| 26 | * handler checks to see whether the faulting instruction has a fixup |
| 27 | * associated and, if so, sets r8 to -EFAULT and clears r9 to 0 and |
| 28 | * then resumes execution at the continuation point. |
| 29 | * |
| 30 | * Based on <asm-alpha/uaccess.h>. |
| 31 | * |
| 32 | * Copyright (C) 1998, 1999, 2001-2004 Hewlett-Packard Co |
| 33 | * David Mosberger-Tang <davidm@hpl.hp.com> |
| 34 | */ |
| 35 | |
| 36 | #include <linux/compiler.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | #include <linux/page-flags.h> |
| 38 | #include <linux/mm.h> |
| 39 | |
| 40 | #include <asm/intrinsics.h> |
| 41 | #include <asm/pgtable.h> |
| 42 | #include <asm/io.h> |
Al Viro | 8bec271 | 2016-12-25 14:24:35 -0500 | [diff] [blame] | 43 | #include <asm/extable.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | |
| 45 | /* |
| 46 | * For historical reasons, the following macros are grossly misnamed: |
| 47 | */ |
| 48 | #define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */ |
| 49 | #define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */ |
| 50 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | #define get_ds() (KERNEL_DS) |
| 52 | #define get_fs() (current_thread_info()->addr_limit) |
| 53 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) |
| 54 | |
| 55 | #define segment_eq(a, b) ((a).seg == (b).seg) |
| 56 | |
| 57 | /* |
| 58 | * When accessing user memory, we need to make sure the entire area really is in |
| 59 | * user-level space. In order to do this efficiently, we make sure that the page at |
| 60 | * address TASK_SIZE is never valid. We also need to make sure that the address doesn't |
| 61 | * point inside the virtually mapped linear page table. |
| 62 | */ |
Al Viro | 7bb8a50 | 2016-12-27 10:29:34 -0500 | [diff] [blame] | 63 | static inline int __access_ok(const void __user *p, unsigned long size) |
| 64 | { |
| 65 | unsigned long addr = (unsigned long)p; |
| 66 | unsigned long seg = get_fs().seg; |
| 67 | return likely(addr <= seg) && |
| 68 | (seg == KERNEL_DS.seg || likely(REGION_OFFSET(addr) < RGN_MAP_LIMIT)); |
| 69 | } |
| 70 | #define access_ok(type, addr, size) __access_ok((addr), (size)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | /* |
| 73 | * These are the main single-value transfer routines. They automatically |
| 74 | * use the right size if we just have the right pointer type. |
| 75 | * |
| 76 | * Careful to not |
| 77 | * (a) re-use the arguments for side effects (sizeof/typeof is ok) |
| 78 | * (b) require any knowledge of processes at this stage |
| 79 | */ |
Al Viro | 1bd5986 | 2016-12-27 10:19:22 -0500 | [diff] [blame] | 80 | #define put_user(x, ptr) __put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr))) |
| 81 | #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | |
| 83 | /* |
| 84 | * The "__xxx" versions do not do address space checking, useful when |
| 85 | * doing multiple accesses to the same area (the programmer has to do the |
| 86 | * checks by hand with "access_ok()") |
| 87 | */ |
| 88 | #define __put_user(x, ptr) __put_user_nocheck((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr))) |
| 89 | #define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr))) |
| 90 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | #ifdef ASM_SUPPORTED |
| 92 | struct __large_struct { unsigned long buf[100]; }; |
| 93 | # define __m(x) (*(struct __large_struct __user *)(x)) |
| 94 | |
| 95 | /* We need to declare the __ex_table section before we can use it in .xdata. */ |
| 96 | asm (".section \"__ex_table\", \"a\"\n\t.previous"); |
| 97 | |
| 98 | # define __get_user_size(val, addr, n, err) \ |
| 99 | do { \ |
| 100 | register long __gu_r8 asm ("r8") = 0; \ |
| 101 | register long __gu_r9 asm ("r9"); \ |
| 102 | asm ("\n[1:]\tld"#n" %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n" \ |
| 103 | "\t.xdata4 \"__ex_table\", 1b-., 1f-.+4\n" \ |
| 104 | "[1:]" \ |
| 105 | : "=r"(__gu_r9), "=r"(__gu_r8) : "m"(__m(addr)), "1"(__gu_r8)); \ |
| 106 | (err) = __gu_r8; \ |
| 107 | (val) = __gu_r9; \ |
| 108 | } while (0) |
| 109 | |
| 110 | /* |
| 111 | * The "__put_user_size()" macro tells gcc it reads from memory instead of writing it. This |
| 112 | * is because they do not write to any memory gcc knows about, so there are no aliasing |
| 113 | * issues. |
| 114 | */ |
| 115 | # define __put_user_size(val, addr, n, err) \ |
| 116 | do { \ |
| 117 | register long __pu_r8 asm ("r8") = 0; \ |
| 118 | asm volatile ("\n[1:]\tst"#n" %1=%r2%P1\t// %0 gets overwritten by exception handler\n" \ |
| 119 | "\t.xdata4 \"__ex_table\", 1b-., 1f-.\n" \ |
| 120 | "[1:]" \ |
| 121 | : "=r"(__pu_r8) : "m"(__m(addr)), "rO"(val), "0"(__pu_r8)); \ |
| 122 | (err) = __pu_r8; \ |
| 123 | } while (0) |
| 124 | |
| 125 | #else /* !ASM_SUPPORTED */ |
| 126 | # define RELOC_TYPE 2 /* ip-rel */ |
| 127 | # define __get_user_size(val, addr, n, err) \ |
| 128 | do { \ |
| 129 | __ld_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE); \ |
| 130 | (err) = ia64_getreg(_IA64_REG_R8); \ |
| 131 | (val) = ia64_getreg(_IA64_REG_R9); \ |
| 132 | } while (0) |
Michael S. Tsirkin | 9605ce7 | 2015-01-06 14:37:22 +0200 | [diff] [blame] | 133 | # define __put_user_size(val, addr, n, err) \ |
| 134 | do { \ |
| 135 | __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, \ |
| 136 | (__force unsigned long) (val)); \ |
| 137 | (err) = ia64_getreg(_IA64_REG_R8); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | } while (0) |
| 139 | #endif /* !ASM_SUPPORTED */ |
| 140 | |
| 141 | extern void __get_user_unknown (void); |
| 142 | |
| 143 | /* |
| 144 | * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which |
| 145 | * could clobber r8 and r9 (among others). Thus, be careful not to evaluate it while |
| 146 | * using r8/r9. |
| 147 | */ |
Al Viro | 11836ec | 2016-12-27 10:21:09 -0500 | [diff] [blame] | 148 | #define __do_get_user(check, x, ptr, size) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | ({ \ |
| 150 | const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ |
| 151 | __typeof__ (size) __gu_size = (size); \ |
Al Viro | 0cc13a5 | 2005-09-29 00:12:13 +0100 | [diff] [blame] | 152 | long __gu_err = -EFAULT; \ |
| 153 | unsigned long __gu_val = 0; \ |
Al Viro | 7bb8a50 | 2016-12-27 10:29:34 -0500 | [diff] [blame] | 154 | if (!check || __access_ok(__gu_ptr, size)) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | switch (__gu_size) { \ |
| 156 | case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \ |
| 157 | case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break; \ |
| 158 | case 4: __get_user_size(__gu_val, __gu_ptr, 4, __gu_err); break; \ |
| 159 | case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break; \ |
| 160 | default: __get_user_unknown(); break; \ |
| 161 | } \ |
Michael S. Tsirkin | a6325e7 | 2014-12-12 01:56:04 +0200 | [diff] [blame] | 162 | (x) = (__force __typeof__(*(__gu_ptr))) __gu_val; \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | __gu_err; \ |
| 164 | }) |
| 165 | |
Al Viro | 11836ec | 2016-12-27 10:21:09 -0500 | [diff] [blame] | 166 | #define __get_user_nocheck(x, ptr, size) __do_get_user(0, x, ptr, size) |
| 167 | #define __get_user_check(x, ptr, size) __do_get_user(1, x, ptr, size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | |
| 169 | extern void __put_user_unknown (void); |
| 170 | |
| 171 | /* |
| 172 | * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which |
| 173 | * could clobber r8 (among others). Thus, be careful not to evaluate them while using r8. |
| 174 | */ |
Al Viro | 11836ec | 2016-12-27 10:21:09 -0500 | [diff] [blame] | 175 | #define __do_put_user(check, x, ptr, size) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | ({ \ |
| 177 | __typeof__ (x) __pu_x = (x); \ |
| 178 | __typeof__ (*(ptr)) __user *__pu_ptr = (ptr); \ |
| 179 | __typeof__ (size) __pu_size = (size); \ |
| 180 | long __pu_err = -EFAULT; \ |
| 181 | \ |
Al Viro | 7bb8a50 | 2016-12-27 10:29:34 -0500 | [diff] [blame] | 182 | if (!check || __access_ok(__pu_ptr, __pu_size)) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | switch (__pu_size) { \ |
| 184 | case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break; \ |
| 185 | case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break; \ |
| 186 | case 4: __put_user_size(__pu_x, __pu_ptr, 4, __pu_err); break; \ |
| 187 | case 8: __put_user_size(__pu_x, __pu_ptr, 8, __pu_err); break; \ |
| 188 | default: __put_user_unknown(); break; \ |
| 189 | } \ |
| 190 | __pu_err; \ |
| 191 | }) |
| 192 | |
Al Viro | 11836ec | 2016-12-27 10:21:09 -0500 | [diff] [blame] | 193 | #define __put_user_nocheck(x, ptr, size) __do_put_user(0, x, ptr, size) |
| 194 | #define __put_user_check(x, ptr, size) __do_put_user(1, x, ptr, size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | |
| 196 | /* |
| 197 | * Complex access routines |
| 198 | */ |
| 199 | extern unsigned long __must_check __copy_user (void __user *to, const void __user *from, |
| 200 | unsigned long count); |
| 201 | |
| 202 | static inline unsigned long |
Al Viro | b3622d3 | 2017-04-05 18:56:55 -0400 | [diff] [blame] | 203 | raw_copy_to_user(void __user *to, const void *from, unsigned long count) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | { |
Al Viro | 0cc13a5 | 2005-09-29 00:12:13 +0100 | [diff] [blame] | 205 | return __copy_user(to, (__force void __user *) from, count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | } |
| 207 | |
| 208 | static inline unsigned long |
Al Viro | b3622d3 | 2017-04-05 18:56:55 -0400 | [diff] [blame] | 209 | raw_copy_from_user(void *to, const void __user *from, unsigned long count) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | { |
Al Viro | 0cc13a5 | 2005-09-29 00:12:13 +0100 | [diff] [blame] | 211 | return __copy_user((__force void __user *) to, from, count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | } |
| 213 | |
Al Viro | b3622d3 | 2017-04-05 18:56:55 -0400 | [diff] [blame] | 214 | #define INLINE_COPY_FROM_USER |
| 215 | #define INLINE_COPY_TO_USER |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | extern unsigned long __do_clear_user (void __user *, unsigned long); |
| 218 | |
| 219 | #define __clear_user(to, n) __do_clear_user(to, n) |
| 220 | |
| 221 | #define clear_user(to, n) \ |
| 222 | ({ \ |
| 223 | unsigned long __cu_len = (n); \ |
Al Viro | 7bb8a50 | 2016-12-27 10:29:34 -0500 | [diff] [blame] | 224 | if (__access_ok(to, __cu_len)) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | __cu_len = __do_clear_user(to, __cu_len); \ |
| 226 | __cu_len; \ |
| 227 | }) |
| 228 | |
| 229 | |
| 230 | /* |
| 231 | * Returns: -EFAULT if exception before terminator, N if the entire buffer filled, else |
| 232 | * strlen. |
| 233 | */ |
| 234 | extern long __must_check __strncpy_from_user (char *to, const char __user *from, long to_len); |
| 235 | |
| 236 | #define strncpy_from_user(to, from, n) \ |
| 237 | ({ \ |
| 238 | const char __user * __sfu_from = (from); \ |
| 239 | long __sfu_ret = -EFAULT; \ |
Al Viro | 7bb8a50 | 2016-12-27 10:29:34 -0500 | [diff] [blame] | 240 | if (__access_ok(__sfu_from, 0)) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | __sfu_ret = __strncpy_from_user((to), __sfu_from, (n)); \ |
| 242 | __sfu_ret; \ |
| 243 | }) |
| 244 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | /* |
| 246 | * Returns: 0 if exception before NUL or reaching the supplied limit |
| 247 | * (N), a value greater than N if the limit would be exceeded, else |
| 248 | * strlen. |
| 249 | */ |
| 250 | extern unsigned long __strnlen_user (const char __user *, long); |
| 251 | |
| 252 | #define strnlen_user(str, len) \ |
| 253 | ({ \ |
| 254 | const char __user *__su_str = (str); \ |
| 255 | unsigned long __su_ret = 0; \ |
Al Viro | 7bb8a50 | 2016-12-27 10:29:34 -0500 | [diff] [blame] | 256 | if (__access_ok(__su_str, 0)) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | __su_ret = __strnlen_user(__su_str, len); \ |
| 258 | __su_ret; \ |
| 259 | }) |
| 260 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | #define ARCH_HAS_TRANSLATE_MEM_PTR 1 |
Thierry Reding | dc01201 | 2014-07-28 17:05:31 +0200 | [diff] [blame] | 262 | static __inline__ void * |
| 263 | xlate_dev_mem_ptr(phys_addr_t p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | { |
| 265 | struct page *page; |
Thierry Reding | dc01201 | 2014-07-28 17:05:31 +0200 | [diff] [blame] | 266 | void *ptr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | |
| 268 | page = pfn_to_page(p >> PAGE_SHIFT); |
| 269 | if (PageUncached(page)) |
Thierry Reding | dc01201 | 2014-07-28 17:05:31 +0200 | [diff] [blame] | 270 | ptr = (void *)p + __IA64_UNCACHED_OFFSET; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | else |
| 272 | ptr = __va(p); |
| 273 | |
| 274 | return ptr; |
| 275 | } |
| 276 | |
| 277 | /* |
| 278 | * Convert a virtual cached kernel memory pointer to an uncached pointer |
| 279 | */ |
Thierry Reding | dc01201 | 2014-07-28 17:05:31 +0200 | [diff] [blame] | 280 | static __inline__ void * |
| 281 | xlate_dev_kmem_ptr(void *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | { |
| 283 | struct page *page; |
Thierry Reding | dc01201 | 2014-07-28 17:05:31 +0200 | [diff] [blame] | 284 | void *ptr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | |
Jes Sorensen | f2454a1 | 2006-10-25 05:49:53 -0400 | [diff] [blame] | 286 | page = virt_to_page((unsigned long)p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | if (PageUncached(page)) |
Thierry Reding | dc01201 | 2014-07-28 17:05:31 +0200 | [diff] [blame] | 288 | ptr = (void *)__pa(p) + __IA64_UNCACHED_OFFSET; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | else |
| 290 | ptr = p; |
| 291 | |
| 292 | return ptr; |
| 293 | } |
| 294 | |
| 295 | #endif /* _ASM_IA64_UACCESS_H */ |