blob: 020f75cc8cf6a8508ecf2b891c3a79559fade666 [file] [log] [blame]
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +02001/*
2 * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
3 * Copyright 2002 Andi Kleen, SuSE Labs.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Subject to the GNU Public License v2.
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +02005 *
6 * Functions to copy from and to user space.
7 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
Jan Beulich8d379da2006-09-26 10:52:32 +02009#include <linux/linkage.h>
Andi Kleen3022d732006-09-26 10:52:39 +020010#include <asm/current.h>
11#include <asm/asm-offsets.h>
12#include <asm/thread_info.h>
Borislav Petkovcd4d09e2016-01-26 22:12:04 +010013#include <asm/cpufeatures.h>
Fenghua Yu4307bec2011-05-17 15:29:15 -070014#include <asm/alternative-asm.h>
H. Peter Anvin9732da82012-04-20 12:19:51 -070015#include <asm/asm.h>
H. Peter Anvin63bcff22012-09-21 12:43:12 -070016#include <asm/smap.h>
Al Viro784d5692016-01-11 11:04:34 -050017#include <asm/export.h>
Andi Kleen3022d732006-09-26 10:52:39 +020018
Linus Torvalds1da177e2005-04-16 15:20:36 -070019/*
Andi Kleen3022d732006-09-26 10:52:39 +020020 * copy_user_generic_unrolled - memory copy with exception handling.
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020021 * This version is for CPUs like P4 that don't have efficient micro
22 * code for rep movsq
23 *
24 * Input:
Linus Torvalds1da177e2005-04-16 15:20:36 -070025 * rdi destination
26 * rsi source
27 * rdx count
28 *
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020029 * Output:
Lucas De Marchi0d2eb442011-03-17 16:24:16 -030030 * eax uncopied bytes or 0 if successful.
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 */
Andi Kleen3022d732006-09-26 10:52:39 +020032ENTRY(copy_user_generic_unrolled)
H. Peter Anvin63bcff22012-09-21 12:43:12 -070033 ASM_STAC
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020034 cmpl $8,%edx
35 jb 20f /* less then 8 bytes, go to byte copy loop */
36 ALIGN_DESTINATION
37 movl %edx,%ecx
38 andl $63,%edx
39 shrl $6,%ecx
Paolo Abeni236222d2017-06-29 15:55:58 +020040 jz .L_copy_short_string
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200411: movq (%rsi),%r8
422: movq 1*8(%rsi),%r9
433: movq 2*8(%rsi),%r10
444: movq 3*8(%rsi),%r11
455: movq %r8,(%rdi)
466: movq %r9,1*8(%rdi)
477: movq %r10,2*8(%rdi)
488: movq %r11,3*8(%rdi)
499: movq 4*8(%rsi),%r8
5010: movq 5*8(%rsi),%r9
5111: movq 6*8(%rsi),%r10
5212: movq 7*8(%rsi),%r11
5313: movq %r8,4*8(%rdi)
5414: movq %r9,5*8(%rdi)
5515: movq %r10,6*8(%rdi)
5616: movq %r11,7*8(%rdi)
Andi Kleen7bcd3f32006-02-03 21:51:02 +010057 leaq 64(%rsi),%rsi
58 leaq 64(%rdi),%rdi
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020059 decl %ecx
60 jnz 1b
Paolo Abeni236222d2017-06-29 15:55:58 +020061.L_copy_short_string:
62 movl %edx,%ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020063 andl $7,%edx
Andi Kleen7bcd3f32006-02-03 21:51:02 +010064 shrl $3,%ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020065 jz 20f
6618: movq (%rsi),%r8
6719: movq %r8,(%rdi)
Andi Kleen7bcd3f32006-02-03 21:51:02 +010068 leaq 8(%rsi),%rsi
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020069 leaq 8(%rdi),%rdi
70 decl %ecx
71 jnz 18b
7220: andl %edx,%edx
73 jz 23f
Andi Kleen7bcd3f32006-02-03 21:51:02 +010074 movl %edx,%ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +02007521: movb (%rsi),%al
7622: movb %al,(%rdi)
Andi Kleen7bcd3f32006-02-03 21:51:02 +010077 incq %rsi
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020078 incq %rdi
Andi Kleen7bcd3f32006-02-03 21:51:02 +010079 decl %ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020080 jnz 21b
8123: xor %eax,%eax
H. Peter Anvin63bcff22012-09-21 12:43:12 -070082 ASM_CLAC
Andi Kleen7bcd3f32006-02-03 21:51:02 +010083 ret
84
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020085 .section .fixup,"ax"
8630: shll $6,%ecx
87 addl %ecx,%edx
88 jmp 60f
H. Peter Anvin661c8012013-11-20 12:50:51 -08008940: leal (%rdx,%rcx,8),%edx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020090 jmp 60f
9150: movl %ecx,%edx
9260: jmp copy_user_handle_tail /* ecx is zerorest also */
93 .previous
Andi Kleen7bcd3f32006-02-03 21:51:02 +010094
H. Peter Anvin9732da82012-04-20 12:19:51 -070095 _ASM_EXTABLE(1b,30b)
96 _ASM_EXTABLE(2b,30b)
97 _ASM_EXTABLE(3b,30b)
98 _ASM_EXTABLE(4b,30b)
99 _ASM_EXTABLE(5b,30b)
100 _ASM_EXTABLE(6b,30b)
101 _ASM_EXTABLE(7b,30b)
102 _ASM_EXTABLE(8b,30b)
103 _ASM_EXTABLE(9b,30b)
104 _ASM_EXTABLE(10b,30b)
105 _ASM_EXTABLE(11b,30b)
106 _ASM_EXTABLE(12b,30b)
107 _ASM_EXTABLE(13b,30b)
108 _ASM_EXTABLE(14b,30b)
109 _ASM_EXTABLE(15b,30b)
110 _ASM_EXTABLE(16b,30b)
111 _ASM_EXTABLE(18b,40b)
112 _ASM_EXTABLE(19b,40b)
113 _ASM_EXTABLE(21b,50b)
114 _ASM_EXTABLE(22b,50b)
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200115ENDPROC(copy_user_generic_unrolled)
Al Viro784d5692016-01-11 11:04:34 -0500116EXPORT_SYMBOL(copy_user_generic_unrolled)
Jan Beulich8d379da2006-09-26 10:52:32 +0200117
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200118/* Some CPUs run faster using the string copy instructions.
119 * This is also a lot simpler. Use them when possible.
120 *
121 * Only 4GB of copy is supported. This shouldn't be a problem
122 * because the kernel normally only writes from/to page sized chunks
123 * even if user space passed a longer buffer.
124 * And more would be dangerous because both Intel and AMD have
125 * errata with rep movsq > 4GB. If someone feels the need to fix
126 * this please consider this.
127 *
128 * Input:
129 * rdi destination
130 * rsi source
131 * rdx count
132 *
133 * Output:
134 * eax uncopied bytes or 0 if successful.
135 */
Andi Kleen3022d732006-09-26 10:52:39 +0200136ENTRY(copy_user_generic_string)
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700137 ASM_STAC
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200138 cmpl $8,%edx
139 jb 2f /* less than 8 bytes, go to byte copy loop */
140 ALIGN_DESTINATION
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 movl %edx,%ecx
142 shrl $3,%ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200143 andl $7,%edx
1441: rep
Andi Kleen3022d732006-09-26 10:52:39 +0200145 movsq
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +02001462: movl %edx,%ecx
1473: rep
148 movsb
Fenghua Yuf4cb1cc2013-11-16 12:37:01 -0800149 xorl %eax,%eax
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700150 ASM_CLAC
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100151 ret
Andi Kleen3022d732006-09-26 10:52:39 +0200152
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200153 .section .fixup,"ax"
H. Peter Anvin661c8012013-11-20 12:50:51 -080015411: leal (%rdx,%rcx,8),%ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020015512: movl %ecx,%edx /* ecx is zerorest also */
156 jmp copy_user_handle_tail
157 .previous
Andi Kleen2cbc9ee2006-01-11 22:44:45 +0100158
H. Peter Anvin9732da82012-04-20 12:19:51 -0700159 _ASM_EXTABLE(1b,11b)
160 _ASM_EXTABLE(3b,12b)
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200161ENDPROC(copy_user_generic_string)
Al Viro784d5692016-01-11 11:04:34 -0500162EXPORT_SYMBOL(copy_user_generic_string)
Fenghua Yu4307bec2011-05-17 15:29:15 -0700163
164/*
165 * Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
166 * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled.
167 *
168 * Input:
169 * rdi destination
170 * rsi source
171 * rdx count
172 *
173 * Output:
174 * eax uncopied bytes or 0 if successful.
175 */
176ENTRY(copy_user_enhanced_fast_string)
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700177 ASM_STAC
Paolo Abeni236222d2017-06-29 15:55:58 +0200178 cmpl $64,%edx
179 jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */
Fenghua Yu4307bec2011-05-17 15:29:15 -0700180 movl %edx,%ecx
1811: rep
182 movsb
Fenghua Yuf4cb1cc2013-11-16 12:37:01 -0800183 xorl %eax,%eax
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700184 ASM_CLAC
Fenghua Yu4307bec2011-05-17 15:29:15 -0700185 ret
186
187 .section .fixup,"ax"
18812: movl %ecx,%edx /* ecx is zerorest also */
189 jmp copy_user_handle_tail
190 .previous
191
H. Peter Anvin9732da82012-04-20 12:19:51 -0700192 _ASM_EXTABLE(1b,12b)
Fenghua Yu4307bec2011-05-17 15:29:15 -0700193ENDPROC(copy_user_enhanced_fast_string)
Al Viro784d5692016-01-11 11:04:34 -0500194EXPORT_SYMBOL(copy_user_enhanced_fast_string)
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200195
196/*
197 * copy_user_nocache - Uncached memory copy with exception handling
Toshi Kaniee9737c2016-02-11 14:24:16 -0700198 * This will force destination out of cache for more performance.
199 *
200 * Note: Cached memory copy is used when destination or size is not
201 * naturally aligned. That is:
202 * - Require 8-byte alignment when size is 8 bytes or larger.
Toshi Kania82eee72016-02-11 14:24:17 -0700203 * - Require 4-byte alignment when size is 4 bytes.
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200204 */
205ENTRY(__copy_user_nocache)
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200206 ASM_STAC
Toshi Kaniee9737c2016-02-11 14:24:16 -0700207
Toshi Kania82eee72016-02-11 14:24:17 -0700208 /* If size is less than 8 bytes, go to 4-byte copy */
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200209 cmpl $8,%edx
Toshi Kania82eee72016-02-11 14:24:17 -0700210 jb .L_4b_nocache_copy_entry
Toshi Kaniee9737c2016-02-11 14:24:16 -0700211
212 /* If destination is not 8-byte aligned, "cache" copy to align it */
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200213 ALIGN_DESTINATION
Toshi Kaniee9737c2016-02-11 14:24:16 -0700214
215 /* Set 4x8-byte copy count and remainder */
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200216 movl %edx,%ecx
217 andl $63,%edx
218 shrl $6,%ecx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700219 jz .L_8b_nocache_copy_entry /* jump if count is 0 */
220
221 /* Perform 4x8-byte nocache loop-copy */
222.L_4x8b_nocache_copy_loop:
Borislav Petkovb41e6ec2015-05-13 19:42:24 +02002231: movq (%rsi),%r8
2242: movq 1*8(%rsi),%r9
2253: movq 2*8(%rsi),%r10
2264: movq 3*8(%rsi),%r11
2275: movnti %r8,(%rdi)
2286: movnti %r9,1*8(%rdi)
2297: movnti %r10,2*8(%rdi)
2308: movnti %r11,3*8(%rdi)
2319: movq 4*8(%rsi),%r8
23210: movq 5*8(%rsi),%r9
23311: movq 6*8(%rsi),%r10
23412: movq 7*8(%rsi),%r11
23513: movnti %r8,4*8(%rdi)
23614: movnti %r9,5*8(%rdi)
23715: movnti %r10,6*8(%rdi)
23816: movnti %r11,7*8(%rdi)
239 leaq 64(%rsi),%rsi
240 leaq 64(%rdi),%rdi
241 decl %ecx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700242 jnz .L_4x8b_nocache_copy_loop
243
244 /* Set 8-byte copy count and remainder */
245.L_8b_nocache_copy_entry:
246 movl %edx,%ecx
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200247 andl $7,%edx
248 shrl $3,%ecx
Toshi Kania82eee72016-02-11 14:24:17 -0700249 jz .L_4b_nocache_copy_entry /* jump if count is 0 */
Toshi Kaniee9737c2016-02-11 14:24:16 -0700250
251 /* Perform 8-byte nocache loop-copy */
252.L_8b_nocache_copy_loop:
25320: movq (%rsi),%r8
25421: movnti %r8,(%rdi)
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200255 leaq 8(%rsi),%rsi
256 leaq 8(%rdi),%rdi
257 decl %ecx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700258 jnz .L_8b_nocache_copy_loop
259
260 /* If no byte left, we're done */
Toshi Kania82eee72016-02-11 14:24:17 -0700261.L_4b_nocache_copy_entry:
262 andl %edx,%edx
263 jz .L_finish_copy
264
265 /* If destination is not 4-byte aligned, go to byte copy: */
266 movl %edi,%ecx
267 andl $3,%ecx
268 jnz .L_1b_cache_copy_entry
269
270 /* Set 4-byte copy count (1 or 0) and remainder */
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200271 movl %edx,%ecx
Toshi Kania82eee72016-02-11 14:24:17 -0700272 andl $3,%edx
273 shrl $2,%ecx
274 jz .L_1b_cache_copy_entry /* jump if count is 0 */
275
276 /* Perform 4-byte nocache copy: */
27730: movl (%rsi),%r8d
27831: movnti %r8d,(%rdi)
279 leaq 4(%rsi),%rsi
280 leaq 4(%rdi),%rdi
281
282 /* If no bytes left, we're done: */
Toshi Kaniee9737c2016-02-11 14:24:16 -0700283 andl %edx,%edx
284 jz .L_finish_copy
285
286 /* Perform byte "cache" loop-copy for the remainder */
Toshi Kania82eee72016-02-11 14:24:17 -0700287.L_1b_cache_copy_entry:
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200288 movl %edx,%ecx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700289.L_1b_cache_copy_loop:
29040: movb (%rsi),%al
29141: movb %al,(%rdi)
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200292 incq %rsi
293 incq %rdi
294 decl %ecx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700295 jnz .L_1b_cache_copy_loop
296
297 /* Finished copying; fence the prior stores */
298.L_finish_copy:
299 xorl %eax,%eax
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200300 ASM_CLAC
301 sfence
302 ret
303
304 .section .fixup,"ax"
Toshi Kaniee9737c2016-02-11 14:24:16 -0700305.L_fixup_4x8b_copy:
306 shll $6,%ecx
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200307 addl %ecx,%edx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700308 jmp .L_fixup_handle_tail
309.L_fixup_8b_copy:
310 lea (%rdx,%rcx,8),%rdx
311 jmp .L_fixup_handle_tail
Toshi Kania82eee72016-02-11 14:24:17 -0700312.L_fixup_4b_copy:
313 lea (%rdx,%rcx,4),%rdx
314 jmp .L_fixup_handle_tail
Toshi Kaniee9737c2016-02-11 14:24:16 -0700315.L_fixup_1b_copy:
316 movl %ecx,%edx
317.L_fixup_handle_tail:
318 sfence
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200319 jmp copy_user_handle_tail
320 .previous
321
Toshi Kaniee9737c2016-02-11 14:24:16 -0700322 _ASM_EXTABLE(1b,.L_fixup_4x8b_copy)
323 _ASM_EXTABLE(2b,.L_fixup_4x8b_copy)
324 _ASM_EXTABLE(3b,.L_fixup_4x8b_copy)
325 _ASM_EXTABLE(4b,.L_fixup_4x8b_copy)
326 _ASM_EXTABLE(5b,.L_fixup_4x8b_copy)
327 _ASM_EXTABLE(6b,.L_fixup_4x8b_copy)
328 _ASM_EXTABLE(7b,.L_fixup_4x8b_copy)
329 _ASM_EXTABLE(8b,.L_fixup_4x8b_copy)
330 _ASM_EXTABLE(9b,.L_fixup_4x8b_copy)
331 _ASM_EXTABLE(10b,.L_fixup_4x8b_copy)
332 _ASM_EXTABLE(11b,.L_fixup_4x8b_copy)
333 _ASM_EXTABLE(12b,.L_fixup_4x8b_copy)
334 _ASM_EXTABLE(13b,.L_fixup_4x8b_copy)
335 _ASM_EXTABLE(14b,.L_fixup_4x8b_copy)
336 _ASM_EXTABLE(15b,.L_fixup_4x8b_copy)
337 _ASM_EXTABLE(16b,.L_fixup_4x8b_copy)
338 _ASM_EXTABLE(20b,.L_fixup_8b_copy)
339 _ASM_EXTABLE(21b,.L_fixup_8b_copy)
Toshi Kania82eee72016-02-11 14:24:17 -0700340 _ASM_EXTABLE(30b,.L_fixup_4b_copy)
341 _ASM_EXTABLE(31b,.L_fixup_4b_copy)
Toshi Kaniee9737c2016-02-11 14:24:16 -0700342 _ASM_EXTABLE(40b,.L_fixup_1b_copy)
343 _ASM_EXTABLE(41b,.L_fixup_1b_copy)
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200344ENDPROC(__copy_user_nocache)
Al Viro784d5692016-01-11 11:04:34 -0500345EXPORT_SYMBOL(__copy_user_nocache)