blob: ce550d06abc367f5be272a0eeeedac18d23ccdf0 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * S390 version
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 1999, 2000
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Author(s): Hartmut Penner (hp@de.ibm.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "include/asm-i386/uaccess.h"
9 */
10#ifndef __S390_UACCESS_H
11#define __S390_UACCESS_H
12
13/*
14 * User space memory access functions
15 */
Heiko Carstensb5a882f2017-02-17 08:13:28 +010016#include <asm/processor.h>
David Howellsa0616cd2012-03-28 18:30:02 +010017#include <asm/ctl_reg.h>
Al Viroe70f1d52016-12-26 00:48:37 -050018#include <asm/extable.h>
Martin Schwidefsky0aaba412017-08-22 12:08:22 +020019#include <asm/facility.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
Sven Schnelle56e62a72020-11-21 11:14:56 +010021void debug_user_asce(int exit);
Heiko Carstens062e5272020-11-16 08:06:41 +010022
Heiko Carstens491af992012-05-29 07:33:59 +020023static inline int __range_ok(unsigned long addr, unsigned long size)
24{
25 return 1;
26}
27
28#define __access_ok(addr, size) \
29({ \
30 __chk_user_ptr(addr); \
31 __range_ok((unsigned long)(addr), (size)); \
Heiko Carstens7683f742011-05-26 09:48:25 +020032})
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Linus Torvalds96d4f262019-01-03 18:57:57 -080034#define access_ok(addr, size) __access_ok(addr, size)
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
Al Viro37096002017-03-28 15:06:24 -040036unsigned long __must_check
37raw_copy_from_user(void *to, const void __user *from, unsigned long n);
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Al Viro37096002017-03-28 15:06:24 -040039unsigned long __must_check
40raw_copy_to_user(void __user *to, const void *from, unsigned long n);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Vasily Gorbik01eb42a2019-04-23 15:36:36 +020042#ifndef CONFIG_KASAN
Al Viro37096002017-03-28 15:06:24 -040043#define INLINE_COPY_FROM_USER
44#define INLINE_COPY_TO_USER
Vasily Gorbik01eb42a2019-04-23 15:36:36 +020045#endif
Gerald Schaefer6c1e3e72009-12-07 12:51:47 +010046
Heiko Carstensdb527392020-10-08 16:28:15 +020047int __put_user_bad(void) __attribute__((noreturn));
48int __get_user_bad(void) __attribute__((noreturn));
49
Heiko Carstensc9ca7842014-04-17 14:16:03 +020050#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
51
Heiko Carstensdbb88642021-06-14 21:07:37 +020052#define __put_get_user_asm(to, from, size, insn) \
Heiko Carstensc9ca7842014-04-17 14:16:03 +020053({ \
Heiko Carstensc9ca7842014-04-17 14:16:03 +020054 int __rc; \
55 \
56 asm volatile( \
Heiko Carstensdbb88642021-06-14 21:07:37 +020057 insn " 0,%[spec]\n" \
58 "0: mvcos %[_to],%[_from],%[_size]\n" \
59 "1: xr %[rc],%[rc]\n" \
Heiko Carstensc9ca7842014-04-17 14:16:03 +020060 "2:\n" \
61 ".pushsection .fixup, \"ax\"\n" \
Heiko Carstensdbb88642021-06-14 21:07:37 +020062 "3: lhi %[rc],%[retval]\n" \
Heiko Carstensc9ca7842014-04-17 14:16:03 +020063 " jg 2b\n" \
64 ".popsection\n" \
65 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
Heiko Carstensdbb88642021-06-14 21:07:37 +020066 : [rc] "=&d" (__rc), [_to] "+Q" (*(to)) \
67 : [_size] "d" (size), [_from] "Q" (*(from)), \
68 [retval] "K" (-EFAULT), [spec] "K" (0x81UL) \
69 : "cc", "0"); \
Heiko Carstensc9ca7842014-04-17 14:16:03 +020070 __rc; \
71})
72
Christian Borntraeger062795f2019-10-08 17:02:32 +020073static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
Heiko Carstensdc4aace2016-06-20 10:35:20 +020074{
Heiko Carstensdc4aace2016-06-20 10:35:20 +020075 int rc;
76
77 switch (size) {
78 case 1:
79 rc = __put_get_user_asm((unsigned char __user *)ptr,
80 (unsigned char *)x,
Heiko Carstensdbb88642021-06-14 21:07:37 +020081 size, "llilh");
Heiko Carstensdc4aace2016-06-20 10:35:20 +020082 break;
83 case 2:
84 rc = __put_get_user_asm((unsigned short __user *)ptr,
85 (unsigned short *)x,
Heiko Carstensdbb88642021-06-14 21:07:37 +020086 size, "llilh");
Heiko Carstensdc4aace2016-06-20 10:35:20 +020087 break;
88 case 4:
89 rc = __put_get_user_asm((unsigned int __user *)ptr,
90 (unsigned int *)x,
Heiko Carstensdbb88642021-06-14 21:07:37 +020091 size, "llilh");
Heiko Carstensdc4aace2016-06-20 10:35:20 +020092 break;
93 case 8:
94 rc = __put_get_user_asm((unsigned long __user *)ptr,
95 (unsigned long *)x,
Heiko Carstensdbb88642021-06-14 21:07:37 +020096 size, "llilh");
Heiko Carstensdc4aace2016-06-20 10:35:20 +020097 break;
Heiko Carstensdb527392020-10-08 16:28:15 +020098 default:
99 __put_user_bad();
100 break;
Heiko Carstens0b925152017-01-02 08:51:02 +0100101 }
Heiko Carstensdc4aace2016-06-20 10:35:20 +0200102 return rc;
103}
104
Christian Borntraeger062795f2019-10-08 17:02:32 +0200105static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
Heiko Carstensdc4aace2016-06-20 10:35:20 +0200106{
Heiko Carstensdc4aace2016-06-20 10:35:20 +0200107 int rc;
108
109 switch (size) {
110 case 1:
111 rc = __put_get_user_asm((unsigned char *)x,
112 (unsigned char __user *)ptr,
Heiko Carstensdbb88642021-06-14 21:07:37 +0200113 size, "lghi");
Heiko Carstensdc4aace2016-06-20 10:35:20 +0200114 break;
115 case 2:
116 rc = __put_get_user_asm((unsigned short *)x,
117 (unsigned short __user *)ptr,
Heiko Carstensdbb88642021-06-14 21:07:37 +0200118 size, "lghi");
Heiko Carstensdc4aace2016-06-20 10:35:20 +0200119 break;
120 case 4:
121 rc = __put_get_user_asm((unsigned int *)x,
122 (unsigned int __user *)ptr,
Heiko Carstensdbb88642021-06-14 21:07:37 +0200123 size, "lghi");
Heiko Carstensdc4aace2016-06-20 10:35:20 +0200124 break;
125 case 8:
126 rc = __put_get_user_asm((unsigned long *)x,
127 (unsigned long __user *)ptr,
Heiko Carstensdbb88642021-06-14 21:07:37 +0200128 size, "lghi");
Heiko Carstensdc4aace2016-06-20 10:35:20 +0200129 break;
Heiko Carstensdb527392020-10-08 16:28:15 +0200130 default:
131 __get_user_bad();
132 break;
Heiko Carstens0b925152017-01-02 08:51:02 +0100133 }
Heiko Carstensdc4aace2016-06-20 10:35:20 +0200134 return rc;
135}
Heiko Carstensc9ca7842014-04-17 14:16:03 +0200136
137#else /* CONFIG_HAVE_MARCH_Z10_FEATURES */
138
Heiko Carstens211deca2014-01-24 12:51:27 +0100139static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200140{
Al Viro37096002017-03-28 15:06:24 -0400141 size = raw_copy_to_user(ptr, x, size);
Heiko Carstens4f41c2b2014-01-23 11:18:36 +0100142 return size ? -EFAULT : 0;
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200143}
144
Heiko Carstens211deca2014-01-24 12:51:27 +0100145static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200146{
Al Viro37096002017-03-28 15:06:24 -0400147 size = raw_copy_from_user(x, ptr, size);
Heiko Carstens4f41c2b2014-01-23 11:18:36 +0100148 return size ? -EFAULT : 0;
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200149}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150
Heiko Carstensc9ca7842014-04-17 14:16:03 +0200151#endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */
152
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153/*
154 * These are the main single-value transfer routines. They automatically
155 * use the right size if we just have the right pointer type.
156 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157#define __put_user(x, ptr) \
158({ \
159 __typeof__(*(ptr)) __x = (x); \
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200160 int __pu_err = -EFAULT; \
Al Viro17566c32005-08-23 22:48:22 +0100161 __chk_user_ptr(ptr); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 switch (sizeof (*(ptr))) { \
163 case 1: \
164 case 2: \
165 case 4: \
166 case 8: \
Heiko Carstenscfa785e2014-01-22 14:49:30 +0100167 __pu_err = __put_user_fn(&__x, ptr, \
168 sizeof(*(ptr))); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 break; \
170 default: \
171 __put_user_bad(); \
172 break; \
Heiko Carstens10e5afb2020-10-08 16:43:17 +0200173 } \
Heiko Carstensee64baf2016-06-13 10:17:20 +0200174 __builtin_expect(__pu_err, 0); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
177#define put_user(x, ptr) \
178({ \
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200179 might_fault(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 __put_user(x, ptr); \
181})
182
183
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184#define __get_user(x, ptr) \
185({ \
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200186 int __gu_err = -EFAULT; \
187 __chk_user_ptr(ptr); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 switch (sizeof(*(ptr))) { \
Martin Schwidefsky1047aa72005-11-07 00:59:11 -0800189 case 1: { \
Al Virofd2d2b12016-08-21 22:00:54 -0400190 unsigned char __x = 0; \
Heiko Carstenscfa785e2014-01-22 14:49:30 +0100191 __gu_err = __get_user_fn(&__x, ptr, \
192 sizeof(*(ptr))); \
Al Viro97fa5a62006-02-03 20:11:52 -0500193 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 break; \
Martin Schwidefsky1047aa72005-11-07 00:59:11 -0800195 }; \
196 case 2: { \
Al Virofd2d2b12016-08-21 22:00:54 -0400197 unsigned short __x = 0; \
Heiko Carstenscfa785e2014-01-22 14:49:30 +0100198 __gu_err = __get_user_fn(&__x, ptr, \
199 sizeof(*(ptr))); \
Al Viro97fa5a62006-02-03 20:11:52 -0500200 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
Martin Schwidefsky1047aa72005-11-07 00:59:11 -0800201 break; \
202 }; \
203 case 4: { \
Al Virofd2d2b12016-08-21 22:00:54 -0400204 unsigned int __x = 0; \
Heiko Carstenscfa785e2014-01-22 14:49:30 +0100205 __gu_err = __get_user_fn(&__x, ptr, \
206 sizeof(*(ptr))); \
Al Viro97fa5a62006-02-03 20:11:52 -0500207 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
Martin Schwidefsky1047aa72005-11-07 00:59:11 -0800208 break; \
209 }; \
210 case 8: { \
Al Virofd2d2b12016-08-21 22:00:54 -0400211 unsigned long long __x = 0; \
Heiko Carstenscfa785e2014-01-22 14:49:30 +0100212 __gu_err = __get_user_fn(&__x, ptr, \
213 sizeof(*(ptr))); \
Al Viro97fa5a62006-02-03 20:11:52 -0500214 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
Martin Schwidefsky1047aa72005-11-07 00:59:11 -0800215 break; \
216 }; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 default: \
218 __get_user_bad(); \
219 break; \
220 } \
Heiko Carstensee64baf2016-06-13 10:17:20 +0200221 __builtin_expect(__gu_err, 0); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224#define get_user(x, ptr) \
225({ \
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200226 might_fault(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 __get_user(x, ptr); \
228})
229
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230/*
231 * Copy a null terminated string from userspace.
232 */
Heiko Carstense93a1cb2021-07-22 22:07:30 +0200233long __must_check strncpy_from_user(char *dst, const char __user *src, long count);
Heiko Carstens4f41c2b2014-01-23 11:18:36 +0100234
Heiko Carstense93a1cb2021-07-22 22:07:30 +0200235long __must_check strnlen_user(const char __user *src, long count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237/*
238 * Zero Userspace
239 */
Heiko Carstens211deca2014-01-24 12:51:27 +0100240unsigned long __must_check __clear_user(void __user *to, unsigned long size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
Heiko Carstens211deca2014-01-24 12:51:27 +0100242static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243{
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200244 might_fault();
Heiko Carstens4f41c2b2014-01-23 11:18:36 +0100245 return __clear_user(to, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246}
247
Heiko Carstens211deca2014-01-24 12:51:27 +0100248int copy_to_user_real(void __user *dest, void *src, unsigned long count);
Josh Poimboeufcb2ccea2020-04-29 10:24:47 -0500249void *s390_kernel_write(void *dst, const void *src, size_t size);
David Howellsa0616cd2012-03-28 18:30:02 +0100250
Heiko Carstens110a6db2020-09-14 13:42:25 +0200251#define HAVE_GET_KERNEL_NOFAULT
252
253int __noreturn __put_kernel_bad(void);
254
255#define __put_kernel_asm(val, to, insn) \
256({ \
257 int __rc; \
258 \
259 asm volatile( \
260 "0: " insn " %2,%1\n" \
261 "1: xr %0,%0\n" \
262 "2:\n" \
263 ".pushsection .fixup, \"ax\"\n" \
264 "3: lhi %0,%3\n" \
265 " jg 2b\n" \
266 ".popsection\n" \
267 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
268 : "=d" (__rc), "+Q" (*(to)) \
269 : "d" (val), "K" (-EFAULT) \
270 : "cc"); \
271 __rc; \
272})
273
274#define __put_kernel_nofault(dst, src, type, err_label) \
275do { \
276 u64 __x = (u64)(*((type *)(src))); \
277 int __pk_err; \
278 \
279 switch (sizeof(type)) { \
280 case 1: \
281 __pk_err = __put_kernel_asm(__x, (type *)(dst), "stc"); \
282 break; \
283 case 2: \
284 __pk_err = __put_kernel_asm(__x, (type *)(dst), "sth"); \
285 break; \
286 case 4: \
287 __pk_err = __put_kernel_asm(__x, (type *)(dst), "st"); \
288 break; \
289 case 8: \
290 __pk_err = __put_kernel_asm(__x, (type *)(dst), "stg"); \
291 break; \
292 default: \
293 __pk_err = __put_kernel_bad(); \
294 break; \
295 } \
296 if (unlikely(__pk_err)) \
297 goto err_label; \
298} while (0)
299
300int __noreturn __get_kernel_bad(void);
301
302#define __get_kernel_asm(val, from, insn) \
303({ \
304 int __rc; \
305 \
306 asm volatile( \
307 "0: " insn " %1,%2\n" \
308 "1: xr %0,%0\n" \
309 "2:\n" \
310 ".pushsection .fixup, \"ax\"\n" \
311 "3: lhi %0,%3\n" \
312 " jg 2b\n" \
313 ".popsection\n" \
314 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
315 : "=d" (__rc), "+d" (val) \
316 : "Q" (*(from)), "K" (-EFAULT) \
317 : "cc"); \
318 __rc; \
319})
320
321#define __get_kernel_nofault(dst, src, type, err_label) \
322do { \
323 int __gk_err; \
324 \
325 switch (sizeof(type)) { \
326 case 1: { \
327 u8 __x = 0; \
328 \
329 __gk_err = __get_kernel_asm(__x, (type *)(src), "ic"); \
330 *((type *)(dst)) = (type)__x; \
331 break; \
332 }; \
333 case 2: { \
334 u16 __x = 0; \
335 \
336 __gk_err = __get_kernel_asm(__x, (type *)(src), "lh"); \
337 *((type *)(dst)) = (type)__x; \
338 break; \
339 }; \
340 case 4: { \
341 u32 __x = 0; \
342 \
343 __gk_err = __get_kernel_asm(__x, (type *)(src), "l"); \
344 *((type *)(dst)) = (type)__x; \
345 break; \
346 }; \
347 case 8: { \
348 u64 __x = 0; \
349 \
350 __gk_err = __get_kernel_asm(__x, (type *)(src), "lg"); \
351 *((type *)(dst)) = (type)__x; \
352 break; \
353 }; \
354 default: \
355 __gk_err = __get_kernel_bad(); \
356 break; \
357 } \
358 if (unlikely(__gk_err)) \
359 goto err_label; \
360} while (0)
361
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362#endif /* __S390_UACCESS_H */