blob: 5cb0c5484efb93f0e95f97ccb28c627d50a018dd [file] [log] [blame]
Magnus Damm1e6760c2008-02-07 19:50:52 +09001#ifndef __ASM_SH_UACCESS_H
2#define __ASM_SH_UACCESS_H
3
Paul Mundt42fd3b12008-06-03 20:05:39 +09004#include <asm/segment.h>
Al Virobcd541d2016-12-25 14:49:42 -05005#include <asm/extable.h>
Paul Mundt42fd3b12008-06-03 20:05:39 +09006
Paul Mundt42fd3b12008-06-03 20:05:39 +09007#define __addr_ok(addr) \
8 ((unsigned long __force)(addr) < current_thread_info()->addr_limit.seg)
9
10/*
11 * __access_ok: Check if address with size is OK or not.
12 *
13 * Uhhuh, this needs 33-bit arithmetic. We have a carry..
14 *
15 * sum := addr + size; carry? --> flag = true;
16 * if (sum >= addr_limit) flag = true;
17 */
18#define __access_ok(addr, size) \
19 (__addr_ok((addr) + (size)))
20#define access_ok(type, addr, size) \
21 (__chk_user_ptr(addr), \
22 __access_ok((unsigned long __force)(addr), (size)))
23
Paul Mundt0e100e12012-05-25 13:02:48 +090024#define user_addr_max() (current_thread_info()->addr_limit.seg)
25
Paul Mundt42fd3b12008-06-03 20:05:39 +090026/*
27 * Uh, these should become the main single-value transfer routines ...
28 * They automatically use the right size if we just have the right
29 * pointer type ...
30 *
31 * As SuperH uses the same address space for kernel and user data, we
32 * can just do these as direct assignments.
33 *
34 * Careful to not
35 * (a) re-use the arguments for side effects (sizeof is ok)
36 * (b) require any knowledge of processes at this stage
37 */
38#define put_user(x,ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
39#define get_user(x,ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
40
41/*
42 * The "__xxx" versions do not do address space checking, useful when
43 * doing multiple accesses to the same area (the user has to do the
44 * checks by hand with "access_ok()")
45 */
46#define __put_user(x,ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
47#define __get_user(x,ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
48
49struct __large_struct { unsigned long buf[100]; };
50#define __m(x) (*(struct __large_struct __user *)(x))
51
52#define __get_user_nocheck(x,ptr,size) \
53({ \
54 long __gu_err; \
55 unsigned long __gu_val; \
56 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
57 __chk_user_ptr(ptr); \
58 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
Michael S. Tsirkincad1c0d2014-12-12 01:56:04 +020059 (x) = (__force __typeof__(*(ptr)))__gu_val; \
Paul Mundt42fd3b12008-06-03 20:05:39 +090060 __gu_err; \
61})
62
63#define __get_user_check(x,ptr,size) \
64({ \
65 long __gu_err = -EFAULT; \
66 unsigned long __gu_val = 0; \
67 const __typeof__(*(ptr)) *__gu_addr = (ptr); \
68 if (likely(access_ok(VERIFY_READ, __gu_addr, (size)))) \
69 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
Michael S. Tsirkincad1c0d2014-12-12 01:56:04 +020070 (x) = (__force __typeof__(*(ptr)))__gu_val; \
Paul Mundt42fd3b12008-06-03 20:05:39 +090071 __gu_err; \
72})
73
74#define __put_user_nocheck(x,ptr,size) \
75({ \
76 long __pu_err; \
77 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
OGAWA Hirofumi6de9c642008-07-29 09:16:33 +090078 __typeof__(*(ptr)) __pu_val = x; \
Paul Mundt42fd3b12008-06-03 20:05:39 +090079 __chk_user_ptr(ptr); \
OGAWA Hirofumi6de9c642008-07-29 09:16:33 +090080 __put_user_size(__pu_val, __pu_addr, (size), __pu_err); \
Paul Mundt42fd3b12008-06-03 20:05:39 +090081 __pu_err; \
82})
83
84#define __put_user_check(x,ptr,size) \
85({ \
86 long __pu_err = -EFAULT; \
87 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
OGAWA Hirofumi6de9c642008-07-29 09:16:33 +090088 __typeof__(*(ptr)) __pu_val = x; \
Paul Mundt42fd3b12008-06-03 20:05:39 +090089 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \
OGAWA Hirofumi6de9c642008-07-29 09:16:33 +090090 __put_user_size(__pu_val, __pu_addr, (size), \
Paul Mundt42fd3b12008-06-03 20:05:39 +090091 __pu_err); \
92 __pu_err; \
93})
94
Paul Mundt9b01bd92007-11-10 19:55:50 +090095#ifdef CONFIG_SUPERH32
David Howellsa1ce3922012-10-02 18:01:25 +010096# include <asm/uaccess_32.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#else
David Howellsa1ce3922012-10-02 18:01:25 +010098# include <asm/uaccess_64.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070099#endif
Magnus Damm1e6760c2008-02-07 19:50:52 +0900100
Paul Mundt0e100e12012-05-25 13:02:48 +0900101extern long strncpy_from_user(char *dest, const char __user *src, long count);
102
Paul Mundtcba8df42012-06-04 15:46:05 +0900103extern __must_check long strlen_user(const char __user *str);
104extern __must_check long strnlen_user(const char __user *str, long n);
105
Paul Mundt42fd3b12008-06-03 20:05:39 +0900106/* Generic arbitrary sized copy. */
107/* Return the number of bytes NOT copied */
108__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
109
110static __always_inline unsigned long
111__copy_from_user(void *to, const void __user *from, unsigned long n)
112{
113 return __copy_user(to, (__force void *)from, n);
114}
115
116static __always_inline unsigned long __must_check
117__copy_to_user(void __user *to, const void *from, unsigned long n)
118{
119 return __copy_user((__force void *)to, from, n);
120}
121
122#define __copy_to_user_inatomic __copy_to_user
123#define __copy_from_user_inatomic __copy_from_user
124
125/*
126 * Clear the area and return remaining number of bytes
127 * (on failure. Usually it's 0.)
128 */
129__kernel_size_t __clear_user(void *addr, __kernel_size_t size);
130
131#define clear_user(addr,n) \
132({ \
133 void __user * __cl_addr = (addr); \
134 unsigned long __cl_size = (n); \
135 \
136 if (__cl_size && access_ok(VERIFY_WRITE, \
137 ((unsigned long)(__cl_addr)), __cl_size)) \
138 __cl_size = __clear_user(__cl_addr, __cl_size); \
139 \
140 __cl_size; \
141})
142
Magnus Damm1e6760c2008-02-07 19:50:52 +0900143static inline unsigned long
144copy_from_user(void *to, const void __user *from, unsigned long n)
145{
146 unsigned long __copy_from = (unsigned long) from;
147 __kernel_size_t __copy_size = (__kernel_size_t) n;
148
149 if (__copy_size && __access_ok(__copy_from, __copy_size))
Al Viro6e050502016-08-21 23:39:47 -0400150 __copy_size = __copy_user(to, from, __copy_size);
151
152 if (unlikely(__copy_size))
153 memset(to + (n - __copy_size), 0, __copy_size);
Magnus Damm1e6760c2008-02-07 19:50:52 +0900154
155 return __copy_size;
156}
157
158static inline unsigned long
159copy_to_user(void __user *to, const void *from, unsigned long n)
160{
161 unsigned long __copy_to = (unsigned long) to;
162 __kernel_size_t __copy_size = (__kernel_size_t) n;
163
164 if (__copy_size && __access_ok(__copy_to, __copy_size))
165 return __copy_user(to, from, __copy_size);
166
167 return __copy_size;
168}
169
David Howellse839ca52012-03-28 18:30:03 +0100170extern void *set_exception_table_vec(unsigned int vec, void *handler);
171
172static inline void *set_exception_table_evt(unsigned int evt, void *handler)
173{
174 return set_exception_table_vec(evt >> 5, handler);
175}
176
177struct mem_access {
178 unsigned long (*from)(void *dst, const void __user *src, unsigned long cnt);
179 unsigned long (*to)(void __user *dst, const void *src, unsigned long cnt);
180};
181
182int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
183 struct mem_access *ma, int, unsigned long address);
Paul Mundt42fd3b12008-06-03 20:05:39 +0900184
Magnus Damm1e6760c2008-02-07 19:50:52 +0900185#endif /* __ASM_SH_UACCESS_H */