blob: 3ea1554d04b3776e90fa1c311ff227a9a201925c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * S390 version
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02003 * Copyright IBM Corp. 1999, 2000
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Author(s): Hartmut Penner (hp@de.ibm.com),
5 * Martin Schwidefsky (schwidefsky@de.ibm.com)
6 *
7 * Derived from "include/asm-i386/uaccess.h"
8 */
9#ifndef __S390_UACCESS_H
10#define __S390_UACCESS_H
11
12/*
13 * User space memory access functions
14 */
15#include <linux/sched.h>
16#include <linux/errno.h>
Heiko Carstensb5a882f2017-02-17 08:13:28 +010017#include <asm/processor.h>
David Howellsa0616cd2012-03-28 18:30:02 +010018#include <asm/ctl_reg.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
20#define VERIFY_READ 0
21#define VERIFY_WRITE 1
22
23
24/*
25 * The fs value determines whether argument validity checking should be
26 * performed or not. If get_fs() == USER_DS, checking is performed, with
27 * get_fs() == KERNEL_DS, checking is bypassed.
28 *
29 * For historical reasons, these macros are grossly misnamed.
30 */
31
32#define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
33
34
35#define KERNEL_DS MAKE_MM_SEG(0)
36#define USER_DS MAKE_MM_SEG(1)
37
38#define get_ds() (KERNEL_DS)
39#define get_fs() (current->thread.mm_segment)
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#define segment_eq(a,b) ((a).ar4 == (b).ar4)
41
Heiko Carstensb5a882f2017-02-17 08:13:28 +010042static inline void set_fs(mm_segment_t fs)
43{
44 current->thread.mm_segment = fs;
45 if (segment_eq(fs, KERNEL_DS)) {
46 set_cpu_flag(CIF_ASCE_SECONDARY);
47 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
48 } else {
49 clear_cpu_flag(CIF_ASCE_SECONDARY);
50 __ctl_load(S390_lowcore.user_asce, 7, 7);
51 }
52}
53
Heiko Carstens491af992012-05-29 07:33:59 +020054static inline int __range_ok(unsigned long addr, unsigned long size)
55{
56 return 1;
57}
58
59#define __access_ok(addr, size) \
60({ \
61 __chk_user_ptr(addr); \
62 __range_ok((unsigned long)(addr), (size)); \
Heiko Carstens7683f742011-05-26 09:48:25 +020063})
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Heiko Carstens7683f742011-05-26 09:48:25 +020065#define access_ok(type, addr, size) __access_ok(addr, size)
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
Linus Torvalds1da177e2005-04-16 15:20:36 -070067/*
68 * The exception table consists of pairs of addresses: the first is the
69 * address of an instruction that is allowed to fault, and the second is
70 * the address at which the program should continue. No registers are
71 * modified, so it is entirely up to the continuation code to figure out
72 * what to do.
73 *
74 * All the routines below use bits of fixup code that are out of line
75 * with the main instruction path. This means when everything is well,
76 * we don't even have to jump over them. Further, they do not intrude
77 * on our cache or tlb entries.
78 */
79
80struct exception_table_entry
81{
Heiko Carstenseb608fb2012-09-05 13:26:11 +020082 int insn, fixup;
Linus Torvalds1da177e2005-04-16 15:20:36 -070083};
84
Heiko Carstenseb608fb2012-09-05 13:26:11 +020085static inline unsigned long extable_fixup(const struct exception_table_entry *x)
86{
87 return (unsigned long)&x->fixup + x->fixup;
88}
89
Ard Biesheuvelc352e8b2016-03-22 14:28:14 -070090#define ARCH_HAS_RELATIVE_EXTABLE
Heiko Carstenseb608fb2012-09-05 13:26:11 +020091
Heiko Carstens4f41c2b2014-01-23 11:18:36 +010092/**
93 * __copy_from_user: - Copy a block of data from user space, with less checking.
94 * @to: Destination address, in kernel space.
95 * @from: Source address, in user space.
96 * @n: Number of bytes to copy.
97 *
David Hildenbrandb3c395ef2015-05-11 17:52:08 +020098 * Context: User context only. This function may sleep if pagefaults are
99 * enabled.
Heiko Carstens4f41c2b2014-01-23 11:18:36 +0100100 *
101 * Copy data from user space to kernel space. Caller must check
102 * the specified block with access_ok() before calling this function.
103 *
104 * Returns number of bytes that could not be copied.
105 * On success, this will be zero.
106 *
107 * If some data could not be copied, this function will pad the copied
108 * data to the requested size using zero bytes.
109 */
Heiko Carstens211deca2014-01-24 12:51:27 +0100110unsigned long __must_check __copy_from_user(void *to, const void __user *from,
111 unsigned long n);
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200112
Heiko Carstens4f41c2b2014-01-23 11:18:36 +0100113/**
114 * __copy_to_user: - Copy a block of data into user space, with less checking.
115 * @to: Destination address, in user space.
116 * @from: Source address, in kernel space.
117 * @n: Number of bytes to copy.
118 *
David Hildenbrandb3c395ef2015-05-11 17:52:08 +0200119 * Context: User context only. This function may sleep if pagefaults are
120 * enabled.
Heiko Carstens4f41c2b2014-01-23 11:18:36 +0100121 *
122 * Copy data from kernel space to user space. Caller must check
123 * the specified block with access_ok() before calling this function.
124 *
125 * Returns number of bytes that could not be copied.
126 * On success, this will be zero.
127 */
128unsigned long __must_check __copy_to_user(void __user *to, const void *from,
129 unsigned long n);
130
131#define __copy_to_user_inatomic __copy_to_user
132#define __copy_from_user_inatomic __copy_from_user
Gerald Schaefer6c1e3e72009-12-07 12:51:47 +0100133
Heiko Carstensc9ca7842014-04-17 14:16:03 +0200134#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
135
136#define __put_get_user_asm(to, from, size, spec) \
137({ \
138 register unsigned long __reg0 asm("0") = spec; \
139 int __rc; \
140 \
141 asm volatile( \
142 "0: mvcos %1,%3,%2\n" \
143 "1: xr %0,%0\n" \
144 "2:\n" \
145 ".pushsection .fixup, \"ax\"\n" \
146 "3: lhi %0,%5\n" \
147 " jg 2b\n" \
148 ".popsection\n" \
149 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
Heiko Carstensd09c5372017-03-27 09:48:04 +0200150 : "=d" (__rc), "+Q" (*(to)) \
Heiko Carstensc9ca7842014-04-17 14:16:03 +0200151 : "d" (size), "Q" (*(from)), \
152 "d" (__reg0), "K" (-EFAULT) \
153 : "cc"); \
154 __rc; \
155})
156
Heiko Carstensdc4aace2016-06-20 10:35:20 +0200157static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
158{
159 unsigned long spec = 0x810000UL;
160 int rc;
161
162 switch (size) {
163 case 1:
164 rc = __put_get_user_asm((unsigned char __user *)ptr,
165 (unsigned char *)x,
166 size, spec);
167 break;
168 case 2:
169 rc = __put_get_user_asm((unsigned short __user *)ptr,
170 (unsigned short *)x,
171 size, spec);
172 break;
173 case 4:
174 rc = __put_get_user_asm((unsigned int __user *)ptr,
175 (unsigned int *)x,
176 size, spec);
177 break;
178 case 8:
179 rc = __put_get_user_asm((unsigned long __user *)ptr,
180 (unsigned long *)x,
181 size, spec);
182 break;
Heiko Carstens0b925152017-01-02 08:51:02 +0100183 }
Heiko Carstensdc4aace2016-06-20 10:35:20 +0200184 return rc;
185}
186
187static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
188{
189 unsigned long spec = 0x81UL;
190 int rc;
191
192 switch (size) {
193 case 1:
194 rc = __put_get_user_asm((unsigned char *)x,
195 (unsigned char __user *)ptr,
196 size, spec);
197 break;
198 case 2:
199 rc = __put_get_user_asm((unsigned short *)x,
200 (unsigned short __user *)ptr,
201 size, spec);
202 break;
203 case 4:
204 rc = __put_get_user_asm((unsigned int *)x,
205 (unsigned int __user *)ptr,
206 size, spec);
207 break;
208 case 8:
209 rc = __put_get_user_asm((unsigned long *)x,
210 (unsigned long __user *)ptr,
211 size, spec);
212 break;
Heiko Carstens0b925152017-01-02 08:51:02 +0100213 }
Heiko Carstensdc4aace2016-06-20 10:35:20 +0200214 return rc;
215}
Heiko Carstensc9ca7842014-04-17 14:16:03 +0200216
217#else /* CONFIG_HAVE_MARCH_Z10_FEATURES */
218
Heiko Carstens211deca2014-01-24 12:51:27 +0100219static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200220{
Heiko Carstens4f41c2b2014-01-23 11:18:36 +0100221 size = __copy_to_user(ptr, x, size);
222 return size ? -EFAULT : 0;
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200223}
224
Heiko Carstens211deca2014-01-24 12:51:27 +0100225static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200226{
Heiko Carstens4f41c2b2014-01-23 11:18:36 +0100227 size = __copy_from_user(x, ptr, size);
228 return size ? -EFAULT : 0;
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200229}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
Heiko Carstensc9ca7842014-04-17 14:16:03 +0200231#endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */
232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233/*
234 * These are the main single-value transfer routines. They automatically
235 * use the right size if we just have the right pointer type.
236 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237#define __put_user(x, ptr) \
238({ \
239 __typeof__(*(ptr)) __x = (x); \
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200240 int __pu_err = -EFAULT; \
Al Viro17566c32005-08-23 22:48:22 +0100241 __chk_user_ptr(ptr); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 switch (sizeof (*(ptr))) { \
243 case 1: \
244 case 2: \
245 case 4: \
246 case 8: \
Heiko Carstenscfa785e2014-01-22 14:49:30 +0100247 __pu_err = __put_user_fn(&__x, ptr, \
248 sizeof(*(ptr))); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 break; \
250 default: \
251 __put_user_bad(); \
252 break; \
253 } \
Heiko Carstensee64baf2016-06-13 10:17:20 +0200254 __builtin_expect(__pu_err, 0); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257#define put_user(x, ptr) \
258({ \
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200259 might_fault(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 __put_user(x, ptr); \
261})
262
263
Heiko Carstens4f41c2b2014-01-23 11:18:36 +0100264int __put_user_bad(void) __attribute__((noreturn));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266#define __get_user(x, ptr) \
267({ \
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200268 int __gu_err = -EFAULT; \
269 __chk_user_ptr(ptr); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 switch (sizeof(*(ptr))) { \
Martin Schwidefsky1047aa72005-11-07 00:59:11 -0800271 case 1: { \
Al Virofd2d2b12016-08-21 22:00:54 -0400272 unsigned char __x = 0; \
Heiko Carstenscfa785e2014-01-22 14:49:30 +0100273 __gu_err = __get_user_fn(&__x, ptr, \
274 sizeof(*(ptr))); \
Al Viro97fa5a62006-02-03 20:11:52 -0500275 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 break; \
Martin Schwidefsky1047aa72005-11-07 00:59:11 -0800277 }; \
278 case 2: { \
Al Virofd2d2b12016-08-21 22:00:54 -0400279 unsigned short __x = 0; \
Heiko Carstenscfa785e2014-01-22 14:49:30 +0100280 __gu_err = __get_user_fn(&__x, ptr, \
281 sizeof(*(ptr))); \
Al Viro97fa5a62006-02-03 20:11:52 -0500282 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
Martin Schwidefsky1047aa72005-11-07 00:59:11 -0800283 break; \
284 }; \
285 case 4: { \
Al Virofd2d2b12016-08-21 22:00:54 -0400286 unsigned int __x = 0; \
Heiko Carstenscfa785e2014-01-22 14:49:30 +0100287 __gu_err = __get_user_fn(&__x, ptr, \
288 sizeof(*(ptr))); \
Al Viro97fa5a62006-02-03 20:11:52 -0500289 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
Martin Schwidefsky1047aa72005-11-07 00:59:11 -0800290 break; \
291 }; \
292 case 8: { \
Al Virofd2d2b12016-08-21 22:00:54 -0400293 unsigned long long __x = 0; \
Heiko Carstenscfa785e2014-01-22 14:49:30 +0100294 __gu_err = __get_user_fn(&__x, ptr, \
295 sizeof(*(ptr))); \
Al Viro97fa5a62006-02-03 20:11:52 -0500296 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
Martin Schwidefsky1047aa72005-11-07 00:59:11 -0800297 break; \
298 }; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 default: \
300 __get_user_bad(); \
301 break; \
302 } \
Heiko Carstensee64baf2016-06-13 10:17:20 +0200303 __builtin_expect(__gu_err, 0); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
306#define get_user(x, ptr) \
307({ \
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200308 might_fault(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 __get_user(x, ptr); \
310})
311
Heiko Carstens4f41c2b2014-01-23 11:18:36 +0100312int __get_user_bad(void) __attribute__((noreturn));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
314#define __put_user_unaligned __put_user
315#define __get_user_unaligned __get_user
316
Josh Poimboeuf0d025d22016-08-30 08:04:16 -0500317extern void __compiletime_error("usercopy buffer size is too small")
318__bad_copy_user(void);
319
320static inline void copy_user_overflow(int size, unsigned long count)
321{
322 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
323}
324
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 * copy_to_user: - Copy a block of data into user space.
327 * @to: Destination address, in user space.
328 * @from: Source address, in kernel space.
329 * @n: Number of bytes to copy.
330 *
David Hildenbrandb3c395ef2015-05-11 17:52:08 +0200331 * Context: User context only. This function may sleep if pagefaults are
332 * enabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 *
334 * Copy data from kernel space to user space.
335 *
336 * Returns number of bytes that could not be copied.
337 * On success, this will be zero.
338 */
Heiko Carstensf7675ad2006-12-04 15:39:55 +0100339static inline unsigned long __must_check
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340copy_to_user(void __user *to, const void *from, unsigned long n)
341{
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200342 might_fault();
Heiko Carstensd12a2972013-02-21 16:57:42 +0100343 return __copy_to_user(to, from, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344}
345
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346/**
347 * copy_from_user: - Copy a block of data from user space.
348 * @to: Destination address, in kernel space.
349 * @from: Source address, in user space.
350 * @n: Number of bytes to copy.
351 *
David Hildenbrandb3c395ef2015-05-11 17:52:08 +0200352 * Context: User context only. This function may sleep if pagefaults are
353 * enabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 *
355 * Copy data from user space to kernel space.
356 *
357 * Returns number of bytes that could not be copied.
358 * On success, this will be zero.
359 *
360 * If some data could not be copied, this function will pad the copied
361 * data to the requested size using zero bytes.
362 */
Heiko Carstensf7675ad2006-12-04 15:39:55 +0100363static inline unsigned long __must_check
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364copy_from_user(void *to, const void __user *from, unsigned long n)
365{
Heiko Carstens1dcec252010-02-26 22:37:22 +0100366 unsigned int sz = __compiletime_object_size(to);
367
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200368 might_fault();
Heiko Carstens1dcec252010-02-26 22:37:22 +0100369 if (unlikely(sz != -1 && sz < n)) {
Josh Poimboeuf0d025d22016-08-30 08:04:16 -0500370 if (!__builtin_constant_p(n))
371 copy_user_overflow(sz, n);
372 else
373 __bad_copy_user();
Heiko Carstens1dcec252010-02-26 22:37:22 +0100374 return n;
375 }
Heiko Carstensd12a2972013-02-21 16:57:42 +0100376 return __copy_from_user(to, from, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377}
378
Heiko Carstens4f41c2b2014-01-23 11:18:36 +0100379unsigned long __must_check
380__copy_in_user(void __user *to, const void __user *from, unsigned long n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
Heiko Carstensf7675ad2006-12-04 15:39:55 +0100382static inline unsigned long __must_check
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383copy_in_user(void __user *to, const void __user *from, unsigned long n)
384{
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200385 might_fault();
Heiko Carstensd12a2972013-02-21 16:57:42 +0100386 return __copy_in_user(to, from, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387}
388
389/*
390 * Copy a null terminated string from userspace.
391 */
Heiko Carstens4f41c2b2014-01-23 11:18:36 +0100392
393long __strncpy_from_user(char *dst, const char __user *src, long count);
394
Heiko Carstensf7675ad2006-12-04 15:39:55 +0100395static inline long __must_check
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396strncpy_from_user(char *dst, const char __user *src, long count)
397{
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200398 might_fault();
Heiko Carstens4f41c2b2014-01-23 11:18:36 +0100399 return __strncpy_from_user(dst, src, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400}
401
Heiko Carstens211deca2014-01-24 12:51:27 +0100402unsigned long __must_check __strnlen_user(const char __user *src, unsigned long count);
Heiko Carstens4f41c2b2014-01-23 11:18:36 +0100403
Heiko Carstens211deca2014-01-24 12:51:27 +0100404static inline unsigned long strnlen_user(const char __user *src, unsigned long n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405{
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200406 might_fault();
Heiko Carstens4f41c2b2014-01-23 11:18:36 +0100407 return __strnlen_user(src, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408}
409
410/**
411 * strlen_user: - Get the size of a string in user space.
412 * @str: The string to measure.
413 *
David Hildenbrandb3c395ef2015-05-11 17:52:08 +0200414 * Context: User context only. This function may sleep if pagefaults are
415 * enabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 *
417 * Get the size of a NUL-terminated string in user space.
418 *
419 * Returns the size of the string INCLUDING the terminating NUL.
420 * On exception, returns 0.
421 *
422 * If there is a limit on the length of a valid string, you may wish to
423 * consider using strnlen_user() instead.
424 */
425#define strlen_user(str) strnlen_user(str, ~0UL)
426
427/*
428 * Zero Userspace
429 */
Heiko Carstens211deca2014-01-24 12:51:27 +0100430unsigned long __must_check __clear_user(void __user *to, unsigned long size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431
Heiko Carstens211deca2014-01-24 12:51:27 +0100432static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433{
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200434 might_fault();
Heiko Carstens4f41c2b2014-01-23 11:18:36 +0100435 return __clear_user(to, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436}
437
Heiko Carstens211deca2014-01-24 12:51:27 +0100438int copy_to_user_real(void __user *dest, void *src, unsigned long count);
Heiko Carstens8a5d8472015-03-13 12:55:56 +0100439void s390_kernel_write(void *dst, const void *src, size_t size);
David Howellsa0616cd2012-03-28 18:30:02 +0100440
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441#endif /* __S390_UACCESS_H */