blob: e1e0364cb9dd34baab7f21f2d97e8e2c773b3cda [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * User address space access functions.
3 *
4 * Copyright 1997 Andi Kleen <ak@muc.de>
5 * Copyright 1997 Linus Torvalds
6 * Copyright 2002 Andi Kleen <ak@suse.de>
7 */
Paul Gortmakere6830142016-07-13 20:18:57 -04008#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <asm/uaccess.h>
10
11/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 * Zero Userspace
13 */
14
15unsigned long __clear_user(void __user *addr, unsigned long size)
16{
17 long __d0;
Nick Piggin3ee1afa2008-09-10 13:37:17 +020018 might_fault();
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 /* no memory constraint because it doesn't change any memory gcc knows
20 about */
H. Peter Anvin63bcff22012-09-21 12:43:12 -070021 stac();
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 asm volatile(
23 " testq %[size8],%[size8]\n"
24 " jz 4f\n"
25 "0: movq %[zero],(%[dst])\n"
26 " addq %[eight],%[dst]\n"
27 " decl %%ecx ; jnz 0b\n"
28 "4: movq %[size1],%%rcx\n"
29 " testl %%ecx,%%ecx\n"
30 " jz 2f\n"
31 "1: movb %b[zero],(%[dst])\n"
32 " incq %[dst]\n"
33 " decl %%ecx ; jnz 1b\n"
34 "2:\n"
35 ".section .fixup,\"ax\"\n"
36 "3: lea 0(%[size1],%[size8],8),%[size8]\n"
37 " jmp 2b\n"
38 ".previous\n"
H. Peter Anvin8da804f2008-02-04 16:47:57 +010039 _ASM_EXTABLE(0b,3b)
40 _ASM_EXTABLE(1b,2b)
Andi Kleene0a96122009-01-16 15:22:11 +010041 : [size8] "=&c"(size), [dst] "=&D" (__d0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
43 [zero] "r" (0UL), [eight] "r" (8UL));
H. Peter Anvin63bcff22012-09-21 12:43:12 -070044 clac();
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 return size;
46}
Andi Kleen2ee60e172006-06-26 13:59:44 +020047EXPORT_SYMBOL(__clear_user);
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
49unsigned long clear_user(void __user *to, unsigned long n)
50{
51 if (access_ok(VERIFY_WRITE, to, n))
52 return __clear_user(to, n);
53 return n;
54}
Andi Kleen2ee60e172006-06-26 13:59:44 +020055EXPORT_SYMBOL(clear_user);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Linus Torvalds1da177e2005-04-16 15:20:36 -070057unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
58{
59 if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
60 return copy_user_generic((__force void *)to, (__force void *)from, len);
61 }
62 return len;
63}
Andi Kleen2ee60e172006-06-26 13:59:44 +020064EXPORT_SYMBOL(copy_in_user);
65
Vitaly Mayatskikh11295852008-07-02 15:48:21 +020066/*
67 * Try to copy last bytes and clear the rest if needed.
68 * Since protection fault in copy_from/to_user is not a normal situation,
69 * it is not necessary to optimize tail handling.
70 */
Andi Kleen277d5b42013-08-05 15:02:43 -070071__visible unsigned long
Linus Torvaldscae2a172015-04-06 10:26:17 -070072copy_user_handle_tail(char *to, char *from, unsigned len)
Vitaly Mayatskikh11295852008-07-02 15:48:21 +020073{
CQ Tang66db3fe2013-03-18 11:02:21 -040074 for (; len; --len, to++) {
Linus Torvaldscae2a172015-04-06 10:26:17 -070075 char c;
76
Vitaly Mayatskikh11295852008-07-02 15:48:21 +020077 if (__get_user_nocheck(c, from++, sizeof(char)))
78 break;
CQ Tang66db3fe2013-03-18 11:02:21 -040079 if (__put_user_nocheck(c, to, sizeof(char)))
Vitaly Mayatskikh11295852008-07-02 15:48:21 +020080 break;
81 }
H. Peter Anvin63bcff22012-09-21 12:43:12 -070082 clac();
Linus Torvaldscae2a172015-04-06 10:26:17 -070083
84 /* If the destination is a kernel buffer, we always clear the end */
Linus Torvaldsd8698442015-04-23 08:33:59 -070085 if (!__addr_ok(to))
Linus Torvaldscae2a172015-04-06 10:26:17 -070086 memset(to, 0, len);
Vitaly Mayatskikh11295852008-07-02 15:48:21 +020087 return len;
88}