blob: a644aad1f112843c0cb92bfb6ef56f1e03702bb0 [file] [log] [blame]
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +02001/*
2 * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
3 * Copyright 2002 Andi Kleen, SuSE Labs.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Subject to the GNU Public License v2.
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +02005 *
6 * Functions to copy from and to user space.
7 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
Jan Beulich8d379da2006-09-26 10:52:32 +02009#include <linux/linkage.h>
Andi Kleen3022d732006-09-26 10:52:39 +020010#include <asm/current.h>
11#include <asm/asm-offsets.h>
12#include <asm/thread_info.h>
13#include <asm/cpufeature.h>
Fenghua Yu4307bec2011-05-17 15:29:15 -070014#include <asm/alternative-asm.h>
H. Peter Anvin9732da82012-04-20 12:19:51 -070015#include <asm/asm.h>
H. Peter Anvin63bcff22012-09-21 12:43:12 -070016#include <asm/smap.h>
Andi Kleen3022d732006-09-26 10:52:39 +020017
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020018/* Standard copy_to_user with segment limit checking */
Frederic Weisbecker3c93ca02009-11-16 15:42:18 +010019ENTRY(_copy_to_user)
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 GET_THREAD_INFO(%rax)
21 movq %rdi,%rcx
22 addq %rdx,%rcx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020023 jc bad_to_user
Glauber Costa26ccb8a2008-06-24 11:19:35 -030024 cmpq TI_addr_limit(%rax),%rcx
Jiri Olsa26afb7c2011-05-12 16:30:30 +020025 ja bad_to_user
Borislav Petkovde2ff882015-01-13 01:38:17 +010026 ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
27 "jmp copy_user_generic_string", \
28 X86_FEATURE_REP_GOOD, \
29 "jmp copy_user_enhanced_fast_string", \
30 X86_FEATURE_ERMS
Frederic Weisbecker3c93ca02009-11-16 15:42:18 +010031ENDPROC(_copy_to_user)
Andi Kleen7bcd3f32006-02-03 21:51:02 +010032
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020033/* Standard copy_from_user with segment limit checking */
Arjan van de Ven9f0cf4a2009-09-26 14:33:01 +020034ENTRY(_copy_from_user)
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 GET_THREAD_INFO(%rax)
36 movq %rsi,%rcx
37 addq %rdx,%rcx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020038 jc bad_from_user
Glauber Costa26ccb8a2008-06-24 11:19:35 -030039 cmpq TI_addr_limit(%rax),%rcx
Jiri Olsa26afb7c2011-05-12 16:30:30 +020040 ja bad_from_user
Borislav Petkovde2ff882015-01-13 01:38:17 +010041 ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
42 "jmp copy_user_generic_string", \
43 X86_FEATURE_REP_GOOD, \
44 "jmp copy_user_enhanced_fast_string", \
45 X86_FEATURE_ERMS
Arjan van de Ven9f0cf4a2009-09-26 14:33:01 +020046ENDPROC(_copy_from_user)
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 .section .fixup,"ax"
49 /* must zero dest */
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020050ENTRY(bad_from_user)
Linus Torvalds1da177e2005-04-16 15:20:36 -070051bad_from_user:
52 movl %edx,%ecx
53 xorl %eax,%eax
54 rep
55 stosb
56bad_to_user:
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020057 movl %edx,%eax
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 ret
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020059ENDPROC(bad_from_user)
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 .previous
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020061
Linus Torvalds1da177e2005-04-16 15:20:36 -070062/*
Andi Kleen3022d732006-09-26 10:52:39 +020063 * copy_user_generic_unrolled - memory copy with exception handling.
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020064 * This version is for CPUs like P4 that don't have efficient micro
65 * code for rep movsq
66 *
67 * Input:
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 * rdi destination
69 * rsi source
70 * rdx count
71 *
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020072 * Output:
Lucas De Marchi0d2eb442011-03-17 16:24:16 -030073 * eax uncopied bytes or 0 if successful.
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 */
Andi Kleen3022d732006-09-26 10:52:39 +020075ENTRY(copy_user_generic_unrolled)
H. Peter Anvin63bcff22012-09-21 12:43:12 -070076 ASM_STAC
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020077 cmpl $8,%edx
78 jb 20f /* less then 8 bytes, go to byte copy loop */
79 ALIGN_DESTINATION
80 movl %edx,%ecx
81 andl $63,%edx
82 shrl $6,%ecx
83 jz 17f
841: movq (%rsi),%r8
852: movq 1*8(%rsi),%r9
863: movq 2*8(%rsi),%r10
874: movq 3*8(%rsi),%r11
885: movq %r8,(%rdi)
896: movq %r9,1*8(%rdi)
907: movq %r10,2*8(%rdi)
918: movq %r11,3*8(%rdi)
929: movq 4*8(%rsi),%r8
9310: movq 5*8(%rsi),%r9
9411: movq 6*8(%rsi),%r10
9512: movq 7*8(%rsi),%r11
9613: movq %r8,4*8(%rdi)
9714: movq %r9,5*8(%rdi)
9815: movq %r10,6*8(%rdi)
9916: movq %r11,7*8(%rdi)
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100100 leaq 64(%rsi),%rsi
101 leaq 64(%rdi),%rdi
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200102 decl %ecx
103 jnz 1b
10417: movl %edx,%ecx
105 andl $7,%edx
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100106 shrl $3,%ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200107 jz 20f
10818: movq (%rsi),%r8
10919: movq %r8,(%rdi)
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100110 leaq 8(%rsi),%rsi
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200111 leaq 8(%rdi),%rdi
112 decl %ecx
113 jnz 18b
11420: andl %edx,%edx
115 jz 23f
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100116 movl %edx,%ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020011721: movb (%rsi),%al
11822: movb %al,(%rdi)
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100119 incq %rsi
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200120 incq %rdi
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100121 decl %ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200122 jnz 21b
12323: xor %eax,%eax
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700124 ASM_CLAC
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100125 ret
126
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200127 .section .fixup,"ax"
12830: shll $6,%ecx
129 addl %ecx,%edx
130 jmp 60f
H. Peter Anvin661c8012013-11-20 12:50:51 -080013140: leal (%rdx,%rcx,8),%edx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200132 jmp 60f
13350: movl %ecx,%edx
13460: jmp copy_user_handle_tail /* ecx is zerorest also */
135 .previous
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100136
H. Peter Anvin9732da82012-04-20 12:19:51 -0700137 _ASM_EXTABLE(1b,30b)
138 _ASM_EXTABLE(2b,30b)
139 _ASM_EXTABLE(3b,30b)
140 _ASM_EXTABLE(4b,30b)
141 _ASM_EXTABLE(5b,30b)
142 _ASM_EXTABLE(6b,30b)
143 _ASM_EXTABLE(7b,30b)
144 _ASM_EXTABLE(8b,30b)
145 _ASM_EXTABLE(9b,30b)
146 _ASM_EXTABLE(10b,30b)
147 _ASM_EXTABLE(11b,30b)
148 _ASM_EXTABLE(12b,30b)
149 _ASM_EXTABLE(13b,30b)
150 _ASM_EXTABLE(14b,30b)
151 _ASM_EXTABLE(15b,30b)
152 _ASM_EXTABLE(16b,30b)
153 _ASM_EXTABLE(18b,40b)
154 _ASM_EXTABLE(19b,40b)
155 _ASM_EXTABLE(21b,50b)
156 _ASM_EXTABLE(22b,50b)
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200157ENDPROC(copy_user_generic_unrolled)
Jan Beulich8d379da2006-09-26 10:52:32 +0200158
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200159/* Some CPUs run faster using the string copy instructions.
160 * This is also a lot simpler. Use them when possible.
161 *
162 * Only 4GB of copy is supported. This shouldn't be a problem
163 * because the kernel normally only writes from/to page sized chunks
164 * even if user space passed a longer buffer.
165 * And more would be dangerous because both Intel and AMD have
166 * errata with rep movsq > 4GB. If someone feels the need to fix
167 * this please consider this.
168 *
169 * Input:
170 * rdi destination
171 * rsi source
172 * rdx count
173 *
174 * Output:
175 * eax uncopied bytes or 0 if successful.
176 */
Andi Kleen3022d732006-09-26 10:52:39 +0200177ENTRY(copy_user_generic_string)
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700178 ASM_STAC
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200179 cmpl $8,%edx
180 jb 2f /* less than 8 bytes, go to byte copy loop */
181 ALIGN_DESTINATION
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 movl %edx,%ecx
183 shrl $3,%ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200184 andl $7,%edx
1851: rep
Andi Kleen3022d732006-09-26 10:52:39 +0200186 movsq
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +02001872: movl %edx,%ecx
1883: rep
189 movsb
Fenghua Yuf4cb1cc2013-11-16 12:37:01 -0800190 xorl %eax,%eax
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700191 ASM_CLAC
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100192 ret
Andi Kleen3022d732006-09-26 10:52:39 +0200193
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200194 .section .fixup,"ax"
H. Peter Anvin661c8012013-11-20 12:50:51 -080019511: leal (%rdx,%rcx,8),%ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020019612: movl %ecx,%edx /* ecx is zerorest also */
197 jmp copy_user_handle_tail
198 .previous
Andi Kleen2cbc9ee2006-01-11 22:44:45 +0100199
H. Peter Anvin9732da82012-04-20 12:19:51 -0700200 _ASM_EXTABLE(1b,11b)
201 _ASM_EXTABLE(3b,12b)
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200202ENDPROC(copy_user_generic_string)
Fenghua Yu4307bec2011-05-17 15:29:15 -0700203
204/*
205 * Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
206 * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled.
207 *
208 * Input:
209 * rdi destination
210 * rsi source
211 * rdx count
212 *
213 * Output:
214 * eax uncopied bytes or 0 if successful.
215 */
216ENTRY(copy_user_enhanced_fast_string)
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700217 ASM_STAC
Fenghua Yu4307bec2011-05-17 15:29:15 -0700218 movl %edx,%ecx
2191: rep
220 movsb
Fenghua Yuf4cb1cc2013-11-16 12:37:01 -0800221 xorl %eax,%eax
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700222 ASM_CLAC
Fenghua Yu4307bec2011-05-17 15:29:15 -0700223 ret
224
225 .section .fixup,"ax"
22612: movl %ecx,%edx /* ecx is zerorest also */
227 jmp copy_user_handle_tail
228 .previous
229
H. Peter Anvin9732da82012-04-20 12:19:51 -0700230 _ASM_EXTABLE(1b,12b)
Fenghua Yu4307bec2011-05-17 15:29:15 -0700231ENDPROC(copy_user_enhanced_fast_string)
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200232
233/*
234 * copy_user_nocache - Uncached memory copy with exception handling
Toshi Kaniee9737c2016-02-11 14:24:16 -0700235 * This will force destination out of cache for more performance.
236 *
237 * Note: Cached memory copy is used when destination or size is not
238 * naturally aligned. That is:
239 * - Require 8-byte alignment when size is 8 bytes or larger.
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200240 */
241ENTRY(__copy_user_nocache)
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200242 ASM_STAC
Toshi Kaniee9737c2016-02-11 14:24:16 -0700243
244 /* If size is less than 8 bytes, go to byte copy */
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200245 cmpl $8,%edx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700246 jb .L_1b_cache_copy_entry
247
248 /* If destination is not 8-byte aligned, "cache" copy to align it */
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200249 ALIGN_DESTINATION
Toshi Kaniee9737c2016-02-11 14:24:16 -0700250
251 /* Set 4x8-byte copy count and remainder */
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200252 movl %edx,%ecx
253 andl $63,%edx
254 shrl $6,%ecx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700255 jz .L_8b_nocache_copy_entry /* jump if count is 0 */
256
257 /* Perform 4x8-byte nocache loop-copy */
258.L_4x8b_nocache_copy_loop:
Borislav Petkovb41e6ec2015-05-13 19:42:24 +02002591: movq (%rsi),%r8
2602: movq 1*8(%rsi),%r9
2613: movq 2*8(%rsi),%r10
2624: movq 3*8(%rsi),%r11
2635: movnti %r8,(%rdi)
2646: movnti %r9,1*8(%rdi)
2657: movnti %r10,2*8(%rdi)
2668: movnti %r11,3*8(%rdi)
2679: movq 4*8(%rsi),%r8
26810: movq 5*8(%rsi),%r9
26911: movq 6*8(%rsi),%r10
27012: movq 7*8(%rsi),%r11
27113: movnti %r8,4*8(%rdi)
27214: movnti %r9,5*8(%rdi)
27315: movnti %r10,6*8(%rdi)
27416: movnti %r11,7*8(%rdi)
275 leaq 64(%rsi),%rsi
276 leaq 64(%rdi),%rdi
277 decl %ecx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700278 jnz .L_4x8b_nocache_copy_loop
279
280 /* Set 8-byte copy count and remainder */
281.L_8b_nocache_copy_entry:
282 movl %edx,%ecx
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200283 andl $7,%edx
284 shrl $3,%ecx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700285 jz .L_1b_cache_copy_entry /* jump if count is 0 */
286
287 /* Perform 8-byte nocache loop-copy */
288.L_8b_nocache_copy_loop:
28920: movq (%rsi),%r8
29021: movnti %r8,(%rdi)
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200291 leaq 8(%rsi),%rsi
292 leaq 8(%rdi),%rdi
293 decl %ecx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700294 jnz .L_8b_nocache_copy_loop
295
296 /* If no byte left, we're done */
297.L_1b_cache_copy_entry:
298 andl %edx,%edx
299 jz .L_finish_copy
300
301 /* Perform byte "cache" loop-copy for the remainder */
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200302 movl %edx,%ecx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700303.L_1b_cache_copy_loop:
30440: movb (%rsi),%al
30541: movb %al,(%rdi)
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200306 incq %rsi
307 incq %rdi
308 decl %ecx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700309 jnz .L_1b_cache_copy_loop
310
311 /* Finished copying; fence the prior stores */
312.L_finish_copy:
313 xorl %eax,%eax
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200314 ASM_CLAC
315 sfence
316 ret
317
318 .section .fixup,"ax"
Toshi Kaniee9737c2016-02-11 14:24:16 -0700319.L_fixup_4x8b_copy:
320 shll $6,%ecx
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200321 addl %ecx,%edx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700322 jmp .L_fixup_handle_tail
323.L_fixup_8b_copy:
324 lea (%rdx,%rcx,8),%rdx
325 jmp .L_fixup_handle_tail
326.L_fixup_1b_copy:
327 movl %ecx,%edx
328.L_fixup_handle_tail:
329 sfence
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200330 jmp copy_user_handle_tail
331 .previous
332
Toshi Kaniee9737c2016-02-11 14:24:16 -0700333 _ASM_EXTABLE(1b,.L_fixup_4x8b_copy)
334 _ASM_EXTABLE(2b,.L_fixup_4x8b_copy)
335 _ASM_EXTABLE(3b,.L_fixup_4x8b_copy)
336 _ASM_EXTABLE(4b,.L_fixup_4x8b_copy)
337 _ASM_EXTABLE(5b,.L_fixup_4x8b_copy)
338 _ASM_EXTABLE(6b,.L_fixup_4x8b_copy)
339 _ASM_EXTABLE(7b,.L_fixup_4x8b_copy)
340 _ASM_EXTABLE(8b,.L_fixup_4x8b_copy)
341 _ASM_EXTABLE(9b,.L_fixup_4x8b_copy)
342 _ASM_EXTABLE(10b,.L_fixup_4x8b_copy)
343 _ASM_EXTABLE(11b,.L_fixup_4x8b_copy)
344 _ASM_EXTABLE(12b,.L_fixup_4x8b_copy)
345 _ASM_EXTABLE(13b,.L_fixup_4x8b_copy)
346 _ASM_EXTABLE(14b,.L_fixup_4x8b_copy)
347 _ASM_EXTABLE(15b,.L_fixup_4x8b_copy)
348 _ASM_EXTABLE(16b,.L_fixup_4x8b_copy)
349 _ASM_EXTABLE(20b,.L_fixup_8b_copy)
350 _ASM_EXTABLE(21b,.L_fixup_8b_copy)
351 _ASM_EXTABLE(40b,.L_fixup_1b_copy)
352 _ASM_EXTABLE(41b,.L_fixup_1b_copy)
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200353ENDPROC(__copy_user_nocache)