blob: 743f209d3fabfe51f2c69e06be61abbba496fac3 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Catalin Marinas0aea86a2012-03-05 11:49:32 +00002/*
3 * Based on arch/arm/include/asm/uaccess.h
4 *
5 * Copyright (C) 2012 ARM Ltd.
Catalin Marinas0aea86a2012-03-05 11:49:32 +00006 */
7#ifndef __ASM_UACCESS_H
8#define __ASM_UACCESS_H
9
Catalin Marinasbd389672016-07-01 14:58:21 +010010#include <asm/alternative.h>
Catalin Marinas4b65a5d2016-07-01 16:53:00 +010011#include <asm/kernel-pgtable.h>
Catalin Marinasbd389672016-07-01 14:58:21 +010012#include <asm/sysreg.h>
13
Catalin Marinas0aea86a2012-03-05 11:49:32 +000014/*
15 * User space memory access functions
16 */
Andre Przywara87261d12016-10-19 14:40:54 +010017#include <linux/bitops.h>
Yang Shibffe1ba2016-06-08 14:40:56 -070018#include <linux/kasan-checks.h>
Catalin Marinas0aea86a2012-03-05 11:49:32 +000019#include <linux/string.h>
Catalin Marinas0aea86a2012-03-05 11:49:32 +000020
James Morse338d4f42015-07-22 19:05:54 +010021#include <asm/cpufeature.h>
Will Deacon5f1f7f62020-06-30 13:53:07 +010022#include <asm/mmu.h>
Catalin Marinas0aea86a2012-03-05 11:49:32 +000023#include <asm/ptrace.h>
Catalin Marinas0aea86a2012-03-05 11:49:32 +000024#include <asm/memory.h>
Al Viro46583932016-12-25 14:00:03 -050025#include <asm/extable.h>
Catalin Marinas0aea86a2012-03-05 11:49:32 +000026
Catalin Marinas0aea86a2012-03-05 11:49:32 +000027#define get_fs() (current_thread_info()->addr_limit)
28
29static inline void set_fs(mm_segment_t fs)
30{
31 current_thread_info()->addr_limit = fs;
James Morse57f49592016-02-05 14:58:48 +000032
Will Deaconc2f0ad42018-02-05 15:34:21 +000033 /*
34 * Prevent a mispredicted conditional call to set_fs from forwarding
35 * the wrong address limit to access_ok under speculation.
36 */
Will Deaconbd4fb6d2018-06-14 11:21:34 +010037 spec_bar();
Will Deaconc2f0ad42018-02-05 15:34:21 +000038
Thomas Garniercf7de272017-06-14 18:12:03 -070039 /* On user-mode return, check fs is correct */
40 set_thread_flag(TIF_FSCHECK);
41
James Morse57f49592016-02-05 14:58:48 +000042 /*
43 * Enable/disable UAO so that copy_to_user() etc can access
44 * kernel memory with the unprivileged instructions.
45 */
46 if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
47 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
48 else
49 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
50 CONFIG_ARM64_UAO));
Catalin Marinas0aea86a2012-03-05 11:49:32 +000051}
52
Christoph Hellwig428e2972020-08-11 18:33:44 -070053#define uaccess_kernel() (get_fs() == KERNEL_DS)
Catalin Marinas0aea86a2012-03-05 11:49:32 +000054
55/*
Catalin Marinas0aea86a2012-03-05 11:49:32 +000056 * Test whether a block of memory is a valid user space address.
57 * Returns 1 if the range is valid, 0 otherwise.
58 *
59 * This is equivalent to the following test:
Robin Murphy51369e32018-02-05 15:34:18 +000060 * (u65)addr + (u65)size <= (u65)current->addr_limit + 1
Catalin Marinas0aea86a2012-03-05 11:49:32 +000061 */
Robin Murphy9085b342018-02-19 13:38:00 +000062static inline unsigned long __range_ok(const void __user *addr, unsigned long size)
Robin Murphy51369e32018-02-05 15:34:18 +000063{
Robin Murphy9085b342018-02-19 13:38:00 +000064 unsigned long ret, limit = current_thread_info()->addr_limit;
Robin Murphy51369e32018-02-05 15:34:18 +000065
Catalin Marinasdf325e02019-12-05 13:57:36 +000066 /*
67 * Asynchronous I/O running in a kernel thread does not have the
68 * TIF_TAGGED_ADDR flag of the process owning the mm, so always untag
69 * the user address before checking.
70 */
Catalin Marinas63f0c602019-07-23 19:58:39 +020071 if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) &&
Catalin Marinasdf325e02019-12-05 13:57:36 +000072 (current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR)))
Catalin Marinas63f0c602019-07-23 19:58:39 +020073 addr = untagged_addr(addr);
Andrey Konovalov2b835e22019-07-23 19:58:38 +020074
Robin Murphy51369e32018-02-05 15:34:18 +000075 __chk_user_ptr(addr);
76 asm volatile(
77 // A + B <= C + 1 for all A,B,C, in four easy steps:
78 // 1: X = A + B; X' = X % 2^64
Robin Murphy9085b342018-02-19 13:38:00 +000079 " adds %0, %3, %2\n"
Robin Murphy51369e32018-02-05 15:34:18 +000080 // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
81 " csel %1, xzr, %1, hi\n"
82 // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
83 // to compensate for the carry flag being set in step 4. For
84 // X > 2^64, X' merely has to remain nonzero, which it does.
85 " csinv %0, %0, xzr, cc\n"
86 // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
87 // comes from the carry in being clear. Otherwise, we are
88 // testing X' - C == 0, subject to the previous adjustments.
89 " sbcs xzr, %0, %1\n"
90 " cset %0, ls\n"
Robin Murphy9085b342018-02-19 13:38:00 +000091 : "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc");
Robin Murphy51369e32018-02-05 15:34:18 +000092
Robin Murphy9085b342018-02-19 13:38:00 +000093 return ret;
Robin Murphy51369e32018-02-05 15:34:18 +000094}
Catalin Marinas0aea86a2012-03-05 11:49:32 +000095
Linus Torvalds96d4f262019-01-03 18:57:57 -080096#define access_ok(addr, size) __range_ok(addr, size)
Will Deacon12a0ef72013-11-06 17:20:22 +000097#define user_addr_max get_fs
Catalin Marinas0aea86a2012-03-05 11:49:32 +000098
Ard Biesheuvel6c94f272016-01-01 15:02:12 +010099#define _ASM_EXTABLE(from, to) \
100 " .pushsection __ex_table, \"a\"\n" \
101 " .align 3\n" \
102 " .long (" #from " - .), (" #to " - .)\n" \
103 " .popsection\n"
104
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000105/*
Catalin Marinasbd389672016-07-01 14:58:21 +0100106 * User access enabling/disabling.
107 */
Catalin Marinas4b65a5d2016-07-01 16:53:00 +0100108#ifdef CONFIG_ARM64_SW_TTBR0_PAN
109static inline void __uaccess_ttbr0_disable(void)
110{
Catalin Marinas6b88a322018-01-10 13:18:30 +0000111 unsigned long flags, ttbr;
Catalin Marinas4b65a5d2016-07-01 16:53:00 +0100112
Catalin Marinas6b88a322018-01-10 13:18:30 +0000113 local_irq_save(flags);
Will Deacon27a921e2017-08-10 13:58:16 +0100114 ttbr = read_sysreg(ttbr1_el1);
Catalin Marinas6b88a322018-01-10 13:18:30 +0000115 ttbr &= ~TTBR_ASID_MASK;
Steve Capper9dfe4822018-01-11 10:11:57 +0000116 /* reserved_ttbr0 placed before swapper_pg_dir */
117 write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1);
Will Deacon27a921e2017-08-10 13:58:16 +0100118 isb();
119 /* Set reserved ASID */
Will Deacon27a921e2017-08-10 13:58:16 +0100120 write_sysreg(ttbr, ttbr1_el1);
Catalin Marinas4b65a5d2016-07-01 16:53:00 +0100121 isb();
Catalin Marinas6b88a322018-01-10 13:18:30 +0000122 local_irq_restore(flags);
Catalin Marinas4b65a5d2016-07-01 16:53:00 +0100123}
124
125static inline void __uaccess_ttbr0_enable(void)
126{
Will Deacon27a921e2017-08-10 13:58:16 +0100127 unsigned long flags, ttbr0, ttbr1;
Catalin Marinas4b65a5d2016-07-01 16:53:00 +0100128
129 /*
130 * Disable interrupts to avoid preemption between reading the 'ttbr0'
131 * variable and the MSR. A context switch could trigger an ASID
132 * roll-over and an update of 'ttbr0'.
133 */
134 local_irq_save(flags);
Catalin Marinas6b88a322018-01-10 13:18:30 +0000135 ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
Will Deacon27a921e2017-08-10 13:58:16 +0100136
137 /* Restore active ASID */
138 ttbr1 = read_sysreg(ttbr1_el1);
Catalin Marinas6b88a322018-01-10 13:18:30 +0000139 ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */
Will Deaconb5195382017-12-01 17:33:48 +0000140 ttbr1 |= ttbr0 & TTBR_ASID_MASK;
Will Deacon27a921e2017-08-10 13:58:16 +0100141 write_sysreg(ttbr1, ttbr1_el1);
142 isb();
143
144 /* Restore user page table */
145 write_sysreg(ttbr0, ttbr0_el1);
Catalin Marinas4b65a5d2016-07-01 16:53:00 +0100146 isb();
147 local_irq_restore(flags);
148}
149
150static inline bool uaccess_ttbr0_disable(void)
151{
152 if (!system_uses_ttbr0_pan())
153 return false;
154 __uaccess_ttbr0_disable();
155 return true;
156}
157
158static inline bool uaccess_ttbr0_enable(void)
159{
160 if (!system_uses_ttbr0_pan())
161 return false;
162 __uaccess_ttbr0_enable();
163 return true;
164}
165#else
166static inline bool uaccess_ttbr0_disable(void)
167{
168 return false;
169}
170
171static inline bool uaccess_ttbr0_enable(void)
172{
173 return false;
174}
175#endif
176
James Morsee1281f52018-01-08 15:38:11 +0000177static inline void __uaccess_disable_hw_pan(void)
178{
179 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,
180 CONFIG_ARM64_PAN));
181}
182
183static inline void __uaccess_enable_hw_pan(void)
184{
185 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
186 CONFIG_ARM64_PAN));
187}
188
Catalin Marinasbd389672016-07-01 14:58:21 +0100189#define __uaccess_disable(alt) \
190do { \
Catalin Marinas4b65a5d2016-07-01 16:53:00 +0100191 if (!uaccess_ttbr0_disable()) \
192 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \
193 CONFIG_ARM64_PAN)); \
Catalin Marinasbd389672016-07-01 14:58:21 +0100194} while (0)
195
196#define __uaccess_enable(alt) \
197do { \
Marc Zyngier75037122016-12-12 13:50:26 +0000198 if (!uaccess_ttbr0_enable()) \
Catalin Marinas4b65a5d2016-07-01 16:53:00 +0100199 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \
200 CONFIG_ARM64_PAN)); \
Catalin Marinasbd389672016-07-01 14:58:21 +0100201} while (0)
202
Mark Rutland923e1e72020-12-02 13:15:50 +0000203static inline void uaccess_disable_privileged(void)
Catalin Marinasbd389672016-07-01 14:58:21 +0100204{
205 __uaccess_disable(ARM64_HAS_PAN);
206}
207
Mark Rutland923e1e72020-12-02 13:15:50 +0000208static inline void uaccess_enable_privileged(void)
Catalin Marinasbd389672016-07-01 14:58:21 +0100209{
210 __uaccess_enable(ARM64_HAS_PAN);
211}
212
213/*
214 * These functions are no-ops when UAO is present.
215 */
216static inline void uaccess_disable_not_uao(void)
217{
218 __uaccess_disable(ARM64_ALT_PAN_NOT_UAO);
219}
220
221static inline void uaccess_enable_not_uao(void)
222{
223 __uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
224}
225
226/*
Robin Murphy4d8efc22018-02-05 15:34:19 +0000227 * Sanitise a uaccess pointer such that it becomes NULL if above the
Andrey Konovalov2b835e22019-07-23 19:58:38 +0200228 * current addr_limit. In case the pointer is tagged (has the top byte set),
229 * untag the pointer before checking.
Robin Murphy4d8efc22018-02-05 15:34:19 +0000230 */
231#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
232static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
233{
234 void __user *safe_ptr;
235
236 asm volatile(
Andrey Konovalov2b835e22019-07-23 19:58:38 +0200237 " bics xzr, %3, %2\n"
Robin Murphy4d8efc22018-02-05 15:34:19 +0000238 " csel %0, %1, xzr, eq\n"
239 : "=&r" (safe_ptr)
Andrey Konovalov2b835e22019-07-23 19:58:38 +0200240 : "r" (ptr), "r" (current_thread_info()->addr_limit),
241 "r" (untagged_addr(ptr))
Robin Murphy4d8efc22018-02-05 15:34:19 +0000242 : "cc");
243
244 csdb();
245 return safe_ptr;
246}
247
248/*
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000249 * The "__xxx" versions of the user access functions do not verify the address
250 * space - it must have been done previously with a separate "access_ok()"
251 * call.
252 *
253 * The "__xxx_error" versions set the third argument to -EFAULT if an error
254 * occurs, and leave it unchanged on success.
255 */
Mark Rutlandf253d822020-12-02 13:15:52 +0000256#define __get_mem_asm(instr, alt_instr, reg, x, addr, err, feature) \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000257 asm volatile( \
James Morse57f49592016-02-05 14:58:48 +0000258 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
259 alt_instr " " reg "1, [%2]\n", feature) \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000260 "2:\n" \
261 " .section .fixup, \"ax\"\n" \
262 " .align 2\n" \
263 "3: mov %w0, %3\n" \
264 " mov %1, #0\n" \
265 " b 2b\n" \
266 " .previous\n" \
Ard Biesheuvel6c94f272016-01-01 15:02:12 +0100267 _ASM_EXTABLE(1b, 3b) \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000268 : "+r" (err), "=&r" (x) \
269 : "r" (addr), "i" (-EFAULT))
270
Mark Rutlandf253d822020-12-02 13:15:52 +0000271#define __raw_get_mem(x, ptr, err) \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000272do { \
273 unsigned long __gu_val; \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000274 switch (sizeof(*(ptr))) { \
275 case 1: \
Mark Rutlandf253d822020-12-02 13:15:52 +0000276 __get_mem_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \
James Morse57f49592016-02-05 14:58:48 +0000277 (err), ARM64_HAS_UAO); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000278 break; \
279 case 2: \
Mark Rutlandf253d822020-12-02 13:15:52 +0000280 __get_mem_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \
James Morse57f49592016-02-05 14:58:48 +0000281 (err), ARM64_HAS_UAO); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000282 break; \
283 case 4: \
Mark Rutlandf253d822020-12-02 13:15:52 +0000284 __get_mem_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \
James Morse57f49592016-02-05 14:58:48 +0000285 (err), ARM64_HAS_UAO); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000286 break; \
287 case 8: \
Mark Rutlandf253d822020-12-02 13:15:52 +0000288 __get_mem_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \
James Morse57f49592016-02-05 14:58:48 +0000289 (err), ARM64_HAS_UAO); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000290 break; \
291 default: \
292 BUILD_BUG(); \
293 } \
Michael S. Tsirkin58fff512014-12-12 01:56:04 +0200294 (x) = (__force __typeof__(*(ptr)))__gu_val; \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000295} while (0)
296
Mark Rutlandf253d822020-12-02 13:15:52 +0000297#define __raw_get_user(x, ptr, err) \
298do { \
299 __chk_user_ptr(ptr); \
300 uaccess_enable_not_uao(); \
301 __raw_get_mem(x, ptr, err); \
302 uaccess_disable_not_uao(); \
303} while (0)
304
Julien Thierry13e4cdd2019-01-15 13:58:26 +0000305#define __get_user_error(x, ptr, err) \
306do { \
Will Deacon84624082018-02-05 15:34:22 +0000307 __typeof__(*(ptr)) __user *__p = (ptr); \
308 might_fault(); \
Linus Torvalds96d4f262019-01-03 18:57:57 -0800309 if (access_ok(__p, sizeof(*__p))) { \
Will Deacon84624082018-02-05 15:34:22 +0000310 __p = uaccess_mask_ptr(__p); \
Catalin Marinas3cd0ddb2019-03-01 14:19:06 +0000311 __raw_get_user((x), __p, (err)); \
Will Deacon84624082018-02-05 15:34:22 +0000312 } else { \
Al Viro8cfb3472020-05-22 15:23:21 +0100313 (x) = (__force __typeof__(x))0; (err) = -EFAULT; \
Will Deacon84624082018-02-05 15:34:22 +0000314 } \
Julien Thierry13e4cdd2019-01-15 13:58:26 +0000315} while (0)
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000316
Will Deacon84624082018-02-05 15:34:22 +0000317#define __get_user(x, ptr) \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000318({ \
Will Deacon84624082018-02-05 15:34:22 +0000319 int __gu_err = 0; \
Julien Thierry13e4cdd2019-01-15 13:58:26 +0000320 __get_user_error((x), (ptr), __gu_err); \
Will Deacon84624082018-02-05 15:34:22 +0000321 __gu_err; \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000322})
323
Will Deacon84624082018-02-05 15:34:22 +0000324#define get_user __get_user
325
Mark Rutlandf253d822020-12-02 13:15:52 +0000326#define __put_mem_asm(instr, alt_instr, reg, x, addr, err, feature) \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000327 asm volatile( \
James Morse57f49592016-02-05 14:58:48 +0000328 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
329 alt_instr " " reg "1, [%2]\n", feature) \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000330 "2:\n" \
331 " .section .fixup,\"ax\"\n" \
332 " .align 2\n" \
333 "3: mov %w0, %3\n" \
334 " b 2b\n" \
335 " .previous\n" \
Ard Biesheuvel6c94f272016-01-01 15:02:12 +0100336 _ASM_EXTABLE(1b, 3b) \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000337 : "+r" (err) \
338 : "r" (x), "r" (addr), "i" (-EFAULT))
339
Mark Rutlandf253d822020-12-02 13:15:52 +0000340#define __raw_put_mem(x, ptr, err) \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000341do { \
342 __typeof__(*(ptr)) __pu_val = (x); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000343 switch (sizeof(*(ptr))) { \
344 case 1: \
Mark Rutlandf253d822020-12-02 13:15:52 +0000345 __put_mem_asm("strb", "sttrb", "%w", __pu_val, (ptr), \
James Morse57f49592016-02-05 14:58:48 +0000346 (err), ARM64_HAS_UAO); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000347 break; \
348 case 2: \
Mark Rutlandf253d822020-12-02 13:15:52 +0000349 __put_mem_asm("strh", "sttrh", "%w", __pu_val, (ptr), \
James Morse57f49592016-02-05 14:58:48 +0000350 (err), ARM64_HAS_UAO); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000351 break; \
352 case 4: \
Mark Rutlandf253d822020-12-02 13:15:52 +0000353 __put_mem_asm("str", "sttr", "%w", __pu_val, (ptr), \
James Morse57f49592016-02-05 14:58:48 +0000354 (err), ARM64_HAS_UAO); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000355 break; \
356 case 8: \
Mark Rutlandf253d822020-12-02 13:15:52 +0000357 __put_mem_asm("str", "sttr", "%x", __pu_val, (ptr), \
James Morse57f49592016-02-05 14:58:48 +0000358 (err), ARM64_HAS_UAO); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000359 break; \
360 default: \
361 BUILD_BUG(); \
362 } \
Mark Rutlandf253d822020-12-02 13:15:52 +0000363} while (0)
364
365#define __raw_put_user(x, ptr, err) \
366do { \
367 __chk_user_ptr(ptr); \
368 uaccess_enable_not_uao(); \
369 __raw_put_mem(x, ptr, err); \
Catalin Marinasbd389672016-07-01 14:58:21 +0100370 uaccess_disable_not_uao(); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000371} while (0)
372
Julien Thierry13e4cdd2019-01-15 13:58:26 +0000373#define __put_user_error(x, ptr, err) \
374do { \
Will Deacon84624082018-02-05 15:34:22 +0000375 __typeof__(*(ptr)) __user *__p = (ptr); \
376 might_fault(); \
Linus Torvalds96d4f262019-01-03 18:57:57 -0800377 if (access_ok(__p, sizeof(*__p))) { \
Will Deacon84624082018-02-05 15:34:22 +0000378 __p = uaccess_mask_ptr(__p); \
Catalin Marinas3cd0ddb2019-03-01 14:19:06 +0000379 __raw_put_user((x), __p, (err)); \
Will Deacon84624082018-02-05 15:34:22 +0000380 } else { \
381 (err) = -EFAULT; \
382 } \
Julien Thierry13e4cdd2019-01-15 13:58:26 +0000383} while (0)
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000384
Will Deacon84624082018-02-05 15:34:22 +0000385#define __put_user(x, ptr) \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000386({ \
Will Deacon84624082018-02-05 15:34:22 +0000387 int __pu_err = 0; \
Julien Thierry13e4cdd2019-01-15 13:58:26 +0000388 __put_user_error((x), (ptr), __pu_err); \
Will Deacon84624082018-02-05 15:34:22 +0000389 __pu_err; \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000390})
391
Will Deacon84624082018-02-05 15:34:22 +0000392#define put_user __put_user
393
Yang Shibffe1ba2016-06-08 14:40:56 -0700394extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
Will Deaconf71c2ff2018-02-05 15:34:23 +0000395#define raw_copy_from_user(to, from, n) \
396({ \
Pavel Tatashine50be642019-11-20 12:07:40 -0500397 unsigned long __acfu_ret; \
398 uaccess_enable_not_uao(); \
399 __acfu_ret = __arch_copy_from_user((to), \
400 __uaccess_mask_ptr(from), (n)); \
401 uaccess_disable_not_uao(); \
402 __acfu_ret; \
Will Deaconf71c2ff2018-02-05 15:34:23 +0000403})
404
Yang Shibffe1ba2016-06-08 14:40:56 -0700405extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
Will Deaconf71c2ff2018-02-05 15:34:23 +0000406#define raw_copy_to_user(to, from, n) \
407({ \
Pavel Tatashine50be642019-11-20 12:07:40 -0500408 unsigned long __actu_ret; \
409 uaccess_enable_not_uao(); \
410 __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to), \
411 (from), (n)); \
412 uaccess_disable_not_uao(); \
413 __actu_ret; \
Will Deaconf71c2ff2018-02-05 15:34:23 +0000414})
415
416extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
417#define raw_copy_in_user(to, from, n) \
418({ \
Pavel Tatashine50be642019-11-20 12:07:40 -0500419 unsigned long __aciu_ret; \
420 uaccess_enable_not_uao(); \
421 __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to), \
422 __uaccess_mask_ptr(from), (n)); \
423 uaccess_disable_not_uao(); \
424 __aciu_ret; \
Will Deaconf71c2ff2018-02-05 15:34:23 +0000425})
426
Al Viro92430da2017-03-21 08:40:57 -0400427#define INLINE_COPY_TO_USER
428#define INLINE_COPY_FROM_USER
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000429
Will Deaconf71c2ff2018-02-05 15:34:23 +0000430extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
431static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000432{
Pavel Tatashine50be642019-11-20 12:07:40 -0500433 if (access_ok(to, n)) {
434 uaccess_enable_not_uao();
Will Deaconf71c2ff2018-02-05 15:34:23 +0000435 n = __arch_clear_user(__uaccess_mask_ptr(to), n);
Pavel Tatashine50be642019-11-20 12:07:40 -0500436 uaccess_disable_not_uao();
437 }
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000438 return n;
439}
Will Deaconf71c2ff2018-02-05 15:34:23 +0000440#define clear_user __clear_user
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000441
Will Deacon12a0ef72013-11-06 17:20:22 +0000442extern long strncpy_from_user(char *dest, const char __user *src, long count);
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000443
Will Deacon12a0ef72013-11-06 17:20:22 +0000444extern __must_check long strnlen_user(const char __user *str, long n);
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000445
Robin Murphy5d7bdeb2017-07-25 11:55:43 +0100446#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
447struct page;
448void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len);
449extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
450
451static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
452{
453 kasan_check_write(dst, size);
Will Deaconf71c2ff2018-02-05 15:34:23 +0000454 return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
Robin Murphy5d7bdeb2017-07-25 11:55:43 +0100455}
456#endif
457
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000458#endif /* __ASM_UACCESS_H */