blob: d74e26b48604d720b5b776057320b3390b10ee3b [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * S390 version
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 1999, 2000
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Author(s): Hartmut Penner (hp@de.ibm.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "include/asm-i386/uaccess.h"
9 */
10#ifndef __S390_UACCESS_H
11#define __S390_UACCESS_H
12
13/*
14 * User space memory access functions
15 */
Heiko Carstensb5a882f2017-02-17 08:13:28 +010016#include <asm/processor.h>
David Howellsa0616cd2012-03-28 18:30:02 +010017#include <asm/ctl_reg.h>
Al Viroe70f1d52016-12-26 00:48:37 -050018#include <asm/extable.h>
Martin Schwidefsky0aaba412017-08-22 12:08:22 +020019#include <asm/facility.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
Sven Schnelle56e62a72020-11-21 11:14:56 +010021void debug_user_asce(int exit);
Heiko Carstens062e5272020-11-16 08:06:41 +010022
Heiko Carstens491af992012-05-29 07:33:59 +020023static inline int __range_ok(unsigned long addr, unsigned long size)
24{
25 return 1;
26}
27
28#define __access_ok(addr, size) \
29({ \
30 __chk_user_ptr(addr); \
31 __range_ok((unsigned long)(addr), (size)); \
Heiko Carstens7683f742011-05-26 09:48:25 +020032})
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Linus Torvalds96d4f262019-01-03 18:57:57 -080034#define access_ok(addr, size) __access_ok(addr, size)
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
Al Viro37096002017-03-28 15:06:24 -040036unsigned long __must_check
37raw_copy_from_user(void *to, const void __user *from, unsigned long n);
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Al Viro37096002017-03-28 15:06:24 -040039unsigned long __must_check
40raw_copy_to_user(void __user *to, const void *from, unsigned long n);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Vasily Gorbik01eb42a2019-04-23 15:36:36 +020042#ifndef CONFIG_KASAN
Al Viro37096002017-03-28 15:06:24 -040043#define INLINE_COPY_FROM_USER
44#define INLINE_COPY_TO_USER
Vasily Gorbik01eb42a2019-04-23 15:36:36 +020045#endif
Gerald Schaefer6c1e3e72009-12-07 12:51:47 +010046
Heiko Carstensdb527392020-10-08 16:28:15 +020047int __put_user_bad(void) __attribute__((noreturn));
48int __get_user_bad(void) __attribute__((noreturn));
49
Nico Boehr012a2242022-01-11 11:00:03 +010050union oac {
51 unsigned int val;
52 struct {
53 struct {
54 unsigned short key : 4;
55 unsigned short : 4;
56 unsigned short as : 2;
57 unsigned short : 4;
58 unsigned short k : 1;
59 unsigned short a : 1;
60 } oac1;
61 struct {
62 unsigned short key : 4;
63 unsigned short : 4;
64 unsigned short as : 2;
65 unsigned short : 4;
66 unsigned short k : 1;
67 unsigned short a : 1;
68 } oac2;
69 };
70};
71
Heiko Carstens3d787b32022-01-22 10:24:31 +010072#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
73
Nico Boehr012a2242022-01-11 11:00:03 +010074#define __put_get_user_asm(to, from, size, oac_spec) \
75({ \
76 int __rc; \
77 \
78 asm volatile( \
79 " lr 0,%[spec]\n" \
80 "0: mvcos %[_to],%[_from],%[_size]\n" \
81 "1: xr %[rc],%[rc]\n" \
82 "2:\n" \
83 ".pushsection .fixup, \"ax\"\n" \
84 "3: lhi %[rc],%[retval]\n" \
85 " jg 2b\n" \
86 ".popsection\n" \
87 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
88 : [rc] "=&d" (__rc), [_to] "+Q" (*(to)) \
89 : [_size] "d" (size), [_from] "Q" (*(from)), \
90 [retval] "K" (-EFAULT), [spec] "d" (oac_spec.val) \
91 : "cc", "0"); \
92 __rc; \
Heiko Carstensc9ca7842014-04-17 14:16:03 +020093})
94
Nico Boehr012a2242022-01-11 11:00:03 +010095#define __put_user_asm(to, from, size) \
96 __put_get_user_asm(to, from, size, ((union oac) { \
97 .oac1.as = PSW_BITS_AS_SECONDARY, \
98 .oac1.a = 1 \
99 }))
100
101#define __get_user_asm(to, from, size) \
102 __put_get_user_asm(to, from, size, ((union oac) { \
103 .oac2.as = PSW_BITS_AS_SECONDARY, \
104 .oac2.a = 1 \
105 })) \
106
Christian Borntraeger062795f2019-10-08 17:02:32 +0200107static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
Heiko Carstensdc4aace2016-06-20 10:35:20 +0200108{
Heiko Carstensdc4aace2016-06-20 10:35:20 +0200109 int rc;
110
111 switch (size) {
112 case 1:
Nico Boehr012a2242022-01-11 11:00:03 +0100113 rc = __put_user_asm((unsigned char __user *)ptr,
114 (unsigned char *)x,
115 size);
Heiko Carstensdc4aace2016-06-20 10:35:20 +0200116 break;
117 case 2:
Nico Boehr012a2242022-01-11 11:00:03 +0100118 rc = __put_user_asm((unsigned short __user *)ptr,
119 (unsigned short *)x,
120 size);
Heiko Carstensdc4aace2016-06-20 10:35:20 +0200121 break;
122 case 4:
Nico Boehr012a2242022-01-11 11:00:03 +0100123 rc = __put_user_asm((unsigned int __user *)ptr,
124 (unsigned int *)x,
125 size);
Heiko Carstensdc4aace2016-06-20 10:35:20 +0200126 break;
127 case 8:
Nico Boehr012a2242022-01-11 11:00:03 +0100128 rc = __put_user_asm((unsigned long __user *)ptr,
129 (unsigned long *)x,
130 size);
Heiko Carstensdc4aace2016-06-20 10:35:20 +0200131 break;
Heiko Carstensdb527392020-10-08 16:28:15 +0200132 default:
133 __put_user_bad();
134 break;
Heiko Carstens0b925152017-01-02 08:51:02 +0100135 }
Heiko Carstensdc4aace2016-06-20 10:35:20 +0200136 return rc;
137}
138
Christian Borntraeger062795f2019-10-08 17:02:32 +0200139static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
Heiko Carstensdc4aace2016-06-20 10:35:20 +0200140{
Heiko Carstensdc4aace2016-06-20 10:35:20 +0200141 int rc;
142
143 switch (size) {
144 case 1:
Nico Boehr012a2242022-01-11 11:00:03 +0100145 rc = __get_user_asm((unsigned char *)x,
146 (unsigned char __user *)ptr,
147 size);
Heiko Carstensdc4aace2016-06-20 10:35:20 +0200148 break;
149 case 2:
Nico Boehr012a2242022-01-11 11:00:03 +0100150 rc = __get_user_asm((unsigned short *)x,
151 (unsigned short __user *)ptr,
152 size);
Heiko Carstensdc4aace2016-06-20 10:35:20 +0200153 break;
154 case 4:
Nico Boehr012a2242022-01-11 11:00:03 +0100155 rc = __get_user_asm((unsigned int *)x,
156 (unsigned int __user *)ptr,
157 size);
Heiko Carstensdc4aace2016-06-20 10:35:20 +0200158 break;
159 case 8:
Nico Boehr012a2242022-01-11 11:00:03 +0100160 rc = __get_user_asm((unsigned long *)x,
161 (unsigned long __user *)ptr,
162 size);
Heiko Carstensdc4aace2016-06-20 10:35:20 +0200163 break;
Heiko Carstensdb527392020-10-08 16:28:15 +0200164 default:
165 __get_user_bad();
166 break;
Heiko Carstens0b925152017-01-02 08:51:02 +0100167 }
Heiko Carstensdc4aace2016-06-20 10:35:20 +0200168 return rc;
169}
Heiko Carstensc9ca7842014-04-17 14:16:03 +0200170
171#else /* CONFIG_HAVE_MARCH_Z10_FEATURES */
172
Heiko Carstens211deca2014-01-24 12:51:27 +0100173static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200174{
Al Viro37096002017-03-28 15:06:24 -0400175 size = raw_copy_to_user(ptr, x, size);
Heiko Carstens4f41c2b2014-01-23 11:18:36 +0100176 return size ? -EFAULT : 0;
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200177}
178
Heiko Carstens211deca2014-01-24 12:51:27 +0100179static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200180{
Al Viro37096002017-03-28 15:06:24 -0400181 size = raw_copy_from_user(x, ptr, size);
Heiko Carstens4f41c2b2014-01-23 11:18:36 +0100182 return size ? -EFAULT : 0;
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200183}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
Heiko Carstensc9ca7842014-04-17 14:16:03 +0200185#endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */
186
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187/*
188 * These are the main single-value transfer routines. They automatically
189 * use the right size if we just have the right pointer type.
190 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191#define __put_user(x, ptr) \
192({ \
193 __typeof__(*(ptr)) __x = (x); \
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200194 int __pu_err = -EFAULT; \
Al Viro17566c32005-08-23 22:48:22 +0100195 __chk_user_ptr(ptr); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 switch (sizeof (*(ptr))) { \
197 case 1: \
198 case 2: \
199 case 4: \
200 case 8: \
Heiko Carstenscfa785e2014-01-22 14:49:30 +0100201 __pu_err = __put_user_fn(&__x, ptr, \
202 sizeof(*(ptr))); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 break; \
204 default: \
205 __put_user_bad(); \
206 break; \
Heiko Carstens10e5afb2020-10-08 16:43:17 +0200207 } \
Heiko Carstensee64baf2016-06-13 10:17:20 +0200208 __builtin_expect(__pu_err, 0); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
211#define put_user(x, ptr) \
212({ \
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200213 might_fault(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 __put_user(x, ptr); \
215})
216
217
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218#define __get_user(x, ptr) \
219({ \
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200220 int __gu_err = -EFAULT; \
221 __chk_user_ptr(ptr); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 switch (sizeof(*(ptr))) { \
Martin Schwidefsky1047aa72005-11-07 00:59:11 -0800223 case 1: { \
Al Virofd2d2b12016-08-21 22:00:54 -0400224 unsigned char __x = 0; \
Heiko Carstenscfa785e2014-01-22 14:49:30 +0100225 __gu_err = __get_user_fn(&__x, ptr, \
226 sizeof(*(ptr))); \
Al Viro97fa5a62006-02-03 20:11:52 -0500227 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 break; \
Martin Schwidefsky1047aa72005-11-07 00:59:11 -0800229 }; \
230 case 2: { \
Al Virofd2d2b12016-08-21 22:00:54 -0400231 unsigned short __x = 0; \
Heiko Carstenscfa785e2014-01-22 14:49:30 +0100232 __gu_err = __get_user_fn(&__x, ptr, \
233 sizeof(*(ptr))); \
Al Viro97fa5a62006-02-03 20:11:52 -0500234 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
Martin Schwidefsky1047aa72005-11-07 00:59:11 -0800235 break; \
236 }; \
237 case 4: { \
Al Virofd2d2b12016-08-21 22:00:54 -0400238 unsigned int __x = 0; \
Heiko Carstenscfa785e2014-01-22 14:49:30 +0100239 __gu_err = __get_user_fn(&__x, ptr, \
240 sizeof(*(ptr))); \
Al Viro97fa5a62006-02-03 20:11:52 -0500241 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
Martin Schwidefsky1047aa72005-11-07 00:59:11 -0800242 break; \
243 }; \
244 case 8: { \
Al Virofd2d2b12016-08-21 22:00:54 -0400245 unsigned long long __x = 0; \
Heiko Carstenscfa785e2014-01-22 14:49:30 +0100246 __gu_err = __get_user_fn(&__x, ptr, \
247 sizeof(*(ptr))); \
Al Viro97fa5a62006-02-03 20:11:52 -0500248 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
Martin Schwidefsky1047aa72005-11-07 00:59:11 -0800249 break; \
250 }; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 default: \
252 __get_user_bad(); \
253 break; \
254 } \
Heiko Carstensee64baf2016-06-13 10:17:20 +0200255 __builtin_expect(__gu_err, 0); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
258#define get_user(x, ptr) \
259({ \
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200260 might_fault(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 __get_user(x, ptr); \
262})
263
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264/*
265 * Copy a null terminated string from userspace.
266 */
Heiko Carstense93a1cb2021-07-22 22:07:30 +0200267long __must_check strncpy_from_user(char *dst, const char __user *src, long count);
Heiko Carstens4f41c2b2014-01-23 11:18:36 +0100268
Heiko Carstense93a1cb2021-07-22 22:07:30 +0200269long __must_check strnlen_user(const char __user *src, long count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271/*
272 * Zero Userspace
273 */
Heiko Carstens211deca2014-01-24 12:51:27 +0100274unsigned long __must_check __clear_user(void __user *to, unsigned long size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
Heiko Carstens211deca2014-01-24 12:51:27 +0100276static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277{
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200278 might_fault();
Heiko Carstens4f41c2b2014-01-23 11:18:36 +0100279 return __clear_user(to, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280}
281
Heiko Carstens211deca2014-01-24 12:51:27 +0100282int copy_to_user_real(void __user *dest, void *src, unsigned long count);
Josh Poimboeufcb2ccea2020-04-29 10:24:47 -0500283void *s390_kernel_write(void *dst, const void *src, size_t size);
David Howellsa0616cd2012-03-28 18:30:02 +0100284
Heiko Carstens110a6db2020-09-14 13:42:25 +0200285#define HAVE_GET_KERNEL_NOFAULT
286
287int __noreturn __put_kernel_bad(void);
288
289#define __put_kernel_asm(val, to, insn) \
290({ \
291 int __rc; \
292 \
293 asm volatile( \
294 "0: " insn " %2,%1\n" \
295 "1: xr %0,%0\n" \
296 "2:\n" \
297 ".pushsection .fixup, \"ax\"\n" \
298 "3: lhi %0,%3\n" \
299 " jg 2b\n" \
300 ".popsection\n" \
301 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
302 : "=d" (__rc), "+Q" (*(to)) \
303 : "d" (val), "K" (-EFAULT) \
304 : "cc"); \
305 __rc; \
306})
307
308#define __put_kernel_nofault(dst, src, type, err_label) \
309do { \
310 u64 __x = (u64)(*((type *)(src))); \
311 int __pk_err; \
312 \
313 switch (sizeof(type)) { \
314 case 1: \
315 __pk_err = __put_kernel_asm(__x, (type *)(dst), "stc"); \
316 break; \
317 case 2: \
318 __pk_err = __put_kernel_asm(__x, (type *)(dst), "sth"); \
319 break; \
320 case 4: \
321 __pk_err = __put_kernel_asm(__x, (type *)(dst), "st"); \
322 break; \
323 case 8: \
324 __pk_err = __put_kernel_asm(__x, (type *)(dst), "stg"); \
325 break; \
326 default: \
327 __pk_err = __put_kernel_bad(); \
328 break; \
329 } \
330 if (unlikely(__pk_err)) \
331 goto err_label; \
332} while (0)
333
334int __noreturn __get_kernel_bad(void);
335
336#define __get_kernel_asm(val, from, insn) \
337({ \
338 int __rc; \
339 \
340 asm volatile( \
341 "0: " insn " %1,%2\n" \
342 "1: xr %0,%0\n" \
343 "2:\n" \
344 ".pushsection .fixup, \"ax\"\n" \
345 "3: lhi %0,%3\n" \
346 " jg 2b\n" \
347 ".popsection\n" \
348 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
349 : "=d" (__rc), "+d" (val) \
350 : "Q" (*(from)), "K" (-EFAULT) \
351 : "cc"); \
352 __rc; \
353})
354
355#define __get_kernel_nofault(dst, src, type, err_label) \
356do { \
357 int __gk_err; \
358 \
359 switch (sizeof(type)) { \
360 case 1: { \
361 u8 __x = 0; \
362 \
363 __gk_err = __get_kernel_asm(__x, (type *)(src), "ic"); \
364 *((type *)(dst)) = (type)__x; \
365 break; \
366 }; \
367 case 2: { \
368 u16 __x = 0; \
369 \
370 __gk_err = __get_kernel_asm(__x, (type *)(src), "lh"); \
371 *((type *)(dst)) = (type)__x; \
372 break; \
373 }; \
374 case 4: { \
375 u32 __x = 0; \
376 \
377 __gk_err = __get_kernel_asm(__x, (type *)(src), "l"); \
378 *((type *)(dst)) = (type)__x; \
379 break; \
380 }; \
381 case 8: { \
382 u64 __x = 0; \
383 \
384 __gk_err = __get_kernel_asm(__x, (type *)(src), "lg"); \
385 *((type *)(dst)) = (type)__x; \
386 break; \
387 }; \
388 default: \
389 __gk_err = __get_kernel_bad(); \
390 break; \
391 } \
392 if (unlikely(__gk_err)) \
393 goto err_label; \
394} while (0)
395
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396#endif /* __S390_UACCESS_H */