blob: 77b9b2a3b5c84dbf8418d7816e84412297fbc12b [file] [log] [blame]
Thomas Gleixner3fc21752019-05-28 10:10:26 -07001/* SPDX-License-Identifier: GPL-2.0-only */
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +02002/*
3 * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
4 * Copyright 2002 Andi Kleen, SuSE Labs.
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +02005 *
6 * Functions to copy from and to user space.
7 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
Jan Beulich8d379da2006-09-26 10:52:32 +02009#include <linux/linkage.h>
Andi Kleen3022d732006-09-26 10:52:39 +020010#include <asm/current.h>
11#include <asm/asm-offsets.h>
12#include <asm/thread_info.h>
Borislav Petkovcd4d09e2016-01-26 22:12:04 +010013#include <asm/cpufeatures.h>
Fenghua Yu4307bec2011-05-17 15:29:15 -070014#include <asm/alternative-asm.h>
H. Peter Anvin9732da82012-04-20 12:19:51 -070015#include <asm/asm.h>
H. Peter Anvin63bcff22012-09-21 12:43:12 -070016#include <asm/smap.h>
Al Viro784d5692016-01-11 11:04:34 -050017#include <asm/export.h>
Tony Lucka2f73402020-10-06 14:09:08 -070018#include <asm/trapnr.h>
Andi Kleen3022d732006-09-26 10:52:39 +020019
Peter Zijlstra3693ca82019-03-01 15:24:33 +010020.macro ALIGN_DESTINATION
21 /* check for bad alignment of destination */
22 movl %edi,%ecx
23 andl $7,%ecx
24 jz 102f /* already aligned */
25 subl $8,%ecx
26 negl %ecx
27 subl %ecx,%edx
28100: movb (%rsi),%al
29101: movb %al,(%rdi)
30 incq %rsi
31 incq %rdi
32 decl %ecx
33 jnz 100b
34102:
35 .section .fixup,"ax"
36103: addl %ecx,%edx /* ecx is zerorest also */
Jiri Slaby98ededb2019-09-06 09:55:50 +020037 jmp .Lcopy_user_handle_tail
Peter Zijlstra3693ca82019-03-01 15:24:33 +010038 .previous
39
Youquan Song278b9172020-10-06 14:09:07 -070040 _ASM_EXTABLE_CPY(100b, 103b)
41 _ASM_EXTABLE_CPY(101b, 103b)
Peter Zijlstra3693ca82019-03-01 15:24:33 +010042 .endm
43
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/*
Andi Kleen3022d732006-09-26 10:52:39 +020045 * copy_user_generic_unrolled - memory copy with exception handling.
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020046 * This version is for CPUs like P4 that don't have efficient micro
47 * code for rep movsq
48 *
49 * Input:
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 * rdi destination
51 * rsi source
52 * rdx count
53 *
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020054 * Output:
Lucas De Marchi0d2eb442011-03-17 16:24:16 -030055 * eax uncopied bytes or 0 if successful.
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 */
Jiri Slaby6dcc5622019-10-11 13:51:04 +020057SYM_FUNC_START(copy_user_generic_unrolled)
H. Peter Anvin63bcff22012-09-21 12:43:12 -070058 ASM_STAC
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020059 cmpl $8,%edx
60 jb 20f /* less then 8 bytes, go to byte copy loop */
61 ALIGN_DESTINATION
62 movl %edx,%ecx
63 andl $63,%edx
64 shrl $6,%ecx
Paolo Abeni236222d2017-06-29 15:55:58 +020065 jz .L_copy_short_string
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200661: movq (%rsi),%r8
672: movq 1*8(%rsi),%r9
683: movq 2*8(%rsi),%r10
694: movq 3*8(%rsi),%r11
705: movq %r8,(%rdi)
716: movq %r9,1*8(%rdi)
727: movq %r10,2*8(%rdi)
738: movq %r11,3*8(%rdi)
749: movq 4*8(%rsi),%r8
7510: movq 5*8(%rsi),%r9
7611: movq 6*8(%rsi),%r10
7712: movq 7*8(%rsi),%r11
7813: movq %r8,4*8(%rdi)
7914: movq %r9,5*8(%rdi)
8015: movq %r10,6*8(%rdi)
8116: movq %r11,7*8(%rdi)
Andi Kleen7bcd3f32006-02-03 21:51:02 +010082 leaq 64(%rsi),%rsi
83 leaq 64(%rdi),%rdi
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020084 decl %ecx
85 jnz 1b
Paolo Abeni236222d2017-06-29 15:55:58 +020086.L_copy_short_string:
87 movl %edx,%ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020088 andl $7,%edx
Andi Kleen7bcd3f32006-02-03 21:51:02 +010089 shrl $3,%ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020090 jz 20f
9118: movq (%rsi),%r8
9219: movq %r8,(%rdi)
Andi Kleen7bcd3f32006-02-03 21:51:02 +010093 leaq 8(%rsi),%rsi
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020094 leaq 8(%rdi),%rdi
95 decl %ecx
96 jnz 18b
9720: andl %edx,%edx
98 jz 23f
Andi Kleen7bcd3f32006-02-03 21:51:02 +010099 movl %edx,%ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020010021: movb (%rsi),%al
10122: movb %al,(%rdi)
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100102 incq %rsi
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200103 incq %rdi
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100104 decl %ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200105 jnz 21b
10623: xor %eax,%eax
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700107 ASM_CLAC
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100108 ret
109
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200110 .section .fixup,"ax"
11130: shll $6,%ecx
112 addl %ecx,%edx
113 jmp 60f
H. Peter Anvin661c8012013-11-20 12:50:51 -080011440: leal (%rdx,%rcx,8),%edx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200115 jmp 60f
11650: movl %ecx,%edx
Jiri Slaby98ededb2019-09-06 09:55:50 +020011760: jmp .Lcopy_user_handle_tail /* ecx is zerorest also */
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200118 .previous
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100119
Youquan Song278b9172020-10-06 14:09:07 -0700120 _ASM_EXTABLE_CPY(1b, 30b)
121 _ASM_EXTABLE_CPY(2b, 30b)
122 _ASM_EXTABLE_CPY(3b, 30b)
123 _ASM_EXTABLE_CPY(4b, 30b)
124 _ASM_EXTABLE_CPY(5b, 30b)
125 _ASM_EXTABLE_CPY(6b, 30b)
126 _ASM_EXTABLE_CPY(7b, 30b)
127 _ASM_EXTABLE_CPY(8b, 30b)
128 _ASM_EXTABLE_CPY(9b, 30b)
129 _ASM_EXTABLE_CPY(10b, 30b)
130 _ASM_EXTABLE_CPY(11b, 30b)
131 _ASM_EXTABLE_CPY(12b, 30b)
132 _ASM_EXTABLE_CPY(13b, 30b)
133 _ASM_EXTABLE_CPY(14b, 30b)
134 _ASM_EXTABLE_CPY(15b, 30b)
135 _ASM_EXTABLE_CPY(16b, 30b)
136 _ASM_EXTABLE_CPY(18b, 40b)
137 _ASM_EXTABLE_CPY(19b, 40b)
138 _ASM_EXTABLE_CPY(21b, 50b)
139 _ASM_EXTABLE_CPY(22b, 50b)
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200140SYM_FUNC_END(copy_user_generic_unrolled)
Al Viro784d5692016-01-11 11:04:34 -0500141EXPORT_SYMBOL(copy_user_generic_unrolled)
Jan Beulich8d379da2006-09-26 10:52:32 +0200142
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200143/* Some CPUs run faster using the string copy instructions.
144 * This is also a lot simpler. Use them when possible.
145 *
146 * Only 4GB of copy is supported. This shouldn't be a problem
147 * because the kernel normally only writes from/to page sized chunks
148 * even if user space passed a longer buffer.
149 * And more would be dangerous because both Intel and AMD have
150 * errata with rep movsq > 4GB. If someone feels the need to fix
151 * this please consider this.
152 *
153 * Input:
154 * rdi destination
155 * rsi source
156 * rdx count
157 *
158 * Output:
159 * eax uncopied bytes or 0 if successful.
160 */
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200161SYM_FUNC_START(copy_user_generic_string)
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700162 ASM_STAC
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200163 cmpl $8,%edx
164 jb 2f /* less than 8 bytes, go to byte copy loop */
165 ALIGN_DESTINATION
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 movl %edx,%ecx
167 shrl $3,%ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200168 andl $7,%edx
1691: rep
Andi Kleen3022d732006-09-26 10:52:39 +0200170 movsq
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +02001712: movl %edx,%ecx
1723: rep
173 movsb
Fenghua Yuf4cb1cc2013-11-16 12:37:01 -0800174 xorl %eax,%eax
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700175 ASM_CLAC
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100176 ret
Andi Kleen3022d732006-09-26 10:52:39 +0200177
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200178 .section .fixup,"ax"
H. Peter Anvin661c8012013-11-20 12:50:51 -080017911: leal (%rdx,%rcx,8),%ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020018012: movl %ecx,%edx /* ecx is zerorest also */
Jiri Slaby98ededb2019-09-06 09:55:50 +0200181 jmp .Lcopy_user_handle_tail
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200182 .previous
Andi Kleen2cbc9ee2006-01-11 22:44:45 +0100183
Youquan Song278b9172020-10-06 14:09:07 -0700184 _ASM_EXTABLE_CPY(1b, 11b)
185 _ASM_EXTABLE_CPY(3b, 12b)
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200186SYM_FUNC_END(copy_user_generic_string)
Al Viro784d5692016-01-11 11:04:34 -0500187EXPORT_SYMBOL(copy_user_generic_string)
Fenghua Yu4307bec2011-05-17 15:29:15 -0700188
189/*
190 * Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
191 * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled.
192 *
193 * Input:
194 * rdi destination
195 * rsi source
196 * rdx count
197 *
198 * Output:
199 * eax uncopied bytes or 0 if successful.
200 */
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200201SYM_FUNC_START(copy_user_enhanced_fast_string)
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700202 ASM_STAC
Paolo Abeni236222d2017-06-29 15:55:58 +0200203 cmpl $64,%edx
204 jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */
Fenghua Yu4307bec2011-05-17 15:29:15 -0700205 movl %edx,%ecx
2061: rep
207 movsb
Fenghua Yuf4cb1cc2013-11-16 12:37:01 -0800208 xorl %eax,%eax
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700209 ASM_CLAC
Fenghua Yu4307bec2011-05-17 15:29:15 -0700210 ret
211
212 .section .fixup,"ax"
21312: movl %ecx,%edx /* ecx is zerorest also */
Jiri Slaby98ededb2019-09-06 09:55:50 +0200214 jmp .Lcopy_user_handle_tail
Fenghua Yu4307bec2011-05-17 15:29:15 -0700215 .previous
216
Youquan Song278b9172020-10-06 14:09:07 -0700217 _ASM_EXTABLE_CPY(1b, 12b)
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200218SYM_FUNC_END(copy_user_enhanced_fast_string)
Al Viro784d5692016-01-11 11:04:34 -0500219EXPORT_SYMBOL(copy_user_enhanced_fast_string)
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200220
221/*
Peter Zijlstra3693ca82019-03-01 15:24:33 +0100222 * Try to copy last bytes and clear the rest if needed.
223 * Since protection fault in copy_from/to_user is not a normal situation,
224 * it is not necessary to optimize tail handling.
Tony Lucka2f73402020-10-06 14:09:08 -0700225 * Don't try to copy the tail if machine check happened
Peter Zijlstra3693ca82019-03-01 15:24:33 +0100226 *
227 * Input:
228 * rdi destination
229 * rsi source
230 * rdx count
231 *
232 * Output:
233 * eax uncopied bytes or 0 if successful.
234 */
Jiri Slabyfa972202019-10-11 13:50:48 +0200235SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail)
Peter Zijlstra3693ca82019-03-01 15:24:33 +0100236 movl %edx,%ecx
Tony Lucka2f73402020-10-06 14:09:08 -0700237 cmp $X86_TRAP_MC,%eax /* check if X86_TRAP_MC */
238 je 3f
Peter Zijlstra3693ca82019-03-01 15:24:33 +01002391: rep movsb
2402: mov %ecx,%eax
241 ASM_CLAC
242 ret
243
Tony Lucka2f73402020-10-06 14:09:08 -0700244 /*
245 * Return zero to pretend that this copy succeeded. This
246 * is counter-intuitive, but needed to prevent the code
247 * in lib/iov_iter.c from retrying and running back into
248 * the poison cache line again. The machine check handler
249 * will ensure that a SIGBUS is sent to the task.
250 */
2513: xorl %eax,%eax
252 ASM_CLAC
253 ret
254
Youquan Song278b9172020-10-06 14:09:07 -0700255 _ASM_EXTABLE_CPY(1b, 2b)
Jiri Slabyfa972202019-10-11 13:50:48 +0200256SYM_CODE_END(.Lcopy_user_handle_tail)
Peter Zijlstra3693ca82019-03-01 15:24:33 +0100257
258/*
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200259 * copy_user_nocache - Uncached memory copy with exception handling
Toshi Kaniee9737c2016-02-11 14:24:16 -0700260 * This will force destination out of cache for more performance.
261 *
262 * Note: Cached memory copy is used when destination or size is not
263 * naturally aligned. That is:
264 * - Require 8-byte alignment when size is 8 bytes or larger.
Toshi Kania82eee72016-02-11 14:24:17 -0700265 * - Require 4-byte alignment when size is 4 bytes.
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200266 */
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200267SYM_FUNC_START(__copy_user_nocache)
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200268 ASM_STAC
Toshi Kaniee9737c2016-02-11 14:24:16 -0700269
Toshi Kania82eee72016-02-11 14:24:17 -0700270 /* If size is less than 8 bytes, go to 4-byte copy */
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200271 cmpl $8,%edx
Toshi Kania82eee72016-02-11 14:24:17 -0700272 jb .L_4b_nocache_copy_entry
Toshi Kaniee9737c2016-02-11 14:24:16 -0700273
274 /* If destination is not 8-byte aligned, "cache" copy to align it */
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200275 ALIGN_DESTINATION
Toshi Kaniee9737c2016-02-11 14:24:16 -0700276
277 /* Set 4x8-byte copy count and remainder */
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200278 movl %edx,%ecx
279 andl $63,%edx
280 shrl $6,%ecx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700281 jz .L_8b_nocache_copy_entry /* jump if count is 0 */
282
283 /* Perform 4x8-byte nocache loop-copy */
284.L_4x8b_nocache_copy_loop:
Borislav Petkovb41e6ec2015-05-13 19:42:24 +02002851: movq (%rsi),%r8
2862: movq 1*8(%rsi),%r9
2873: movq 2*8(%rsi),%r10
2884: movq 3*8(%rsi),%r11
2895: movnti %r8,(%rdi)
2906: movnti %r9,1*8(%rdi)
2917: movnti %r10,2*8(%rdi)
2928: movnti %r11,3*8(%rdi)
2939: movq 4*8(%rsi),%r8
29410: movq 5*8(%rsi),%r9
29511: movq 6*8(%rsi),%r10
29612: movq 7*8(%rsi),%r11
29713: movnti %r8,4*8(%rdi)
29814: movnti %r9,5*8(%rdi)
29915: movnti %r10,6*8(%rdi)
30016: movnti %r11,7*8(%rdi)
301 leaq 64(%rsi),%rsi
302 leaq 64(%rdi),%rdi
303 decl %ecx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700304 jnz .L_4x8b_nocache_copy_loop
305
306 /* Set 8-byte copy count and remainder */
307.L_8b_nocache_copy_entry:
308 movl %edx,%ecx
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200309 andl $7,%edx
310 shrl $3,%ecx
Toshi Kania82eee72016-02-11 14:24:17 -0700311 jz .L_4b_nocache_copy_entry /* jump if count is 0 */
Toshi Kaniee9737c2016-02-11 14:24:16 -0700312
313 /* Perform 8-byte nocache loop-copy */
314.L_8b_nocache_copy_loop:
31520: movq (%rsi),%r8
31621: movnti %r8,(%rdi)
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200317 leaq 8(%rsi),%rsi
318 leaq 8(%rdi),%rdi
319 decl %ecx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700320 jnz .L_8b_nocache_copy_loop
321
322 /* If no byte left, we're done */
Toshi Kania82eee72016-02-11 14:24:17 -0700323.L_4b_nocache_copy_entry:
324 andl %edx,%edx
325 jz .L_finish_copy
326
327 /* If destination is not 4-byte aligned, go to byte copy: */
328 movl %edi,%ecx
329 andl $3,%ecx
330 jnz .L_1b_cache_copy_entry
331
332 /* Set 4-byte copy count (1 or 0) and remainder */
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200333 movl %edx,%ecx
Toshi Kania82eee72016-02-11 14:24:17 -0700334 andl $3,%edx
335 shrl $2,%ecx
336 jz .L_1b_cache_copy_entry /* jump if count is 0 */
337
338 /* Perform 4-byte nocache copy: */
33930: movl (%rsi),%r8d
34031: movnti %r8d,(%rdi)
341 leaq 4(%rsi),%rsi
342 leaq 4(%rdi),%rdi
343
344 /* If no bytes left, we're done: */
Toshi Kaniee9737c2016-02-11 14:24:16 -0700345 andl %edx,%edx
346 jz .L_finish_copy
347
348 /* Perform byte "cache" loop-copy for the remainder */
Toshi Kania82eee72016-02-11 14:24:17 -0700349.L_1b_cache_copy_entry:
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200350 movl %edx,%ecx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700351.L_1b_cache_copy_loop:
35240: movb (%rsi),%al
35341: movb %al,(%rdi)
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200354 incq %rsi
355 incq %rdi
356 decl %ecx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700357 jnz .L_1b_cache_copy_loop
358
359 /* Finished copying; fence the prior stores */
360.L_finish_copy:
361 xorl %eax,%eax
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200362 ASM_CLAC
363 sfence
364 ret
365
366 .section .fixup,"ax"
Toshi Kaniee9737c2016-02-11 14:24:16 -0700367.L_fixup_4x8b_copy:
368 shll $6,%ecx
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200369 addl %ecx,%edx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700370 jmp .L_fixup_handle_tail
371.L_fixup_8b_copy:
372 lea (%rdx,%rcx,8),%rdx
373 jmp .L_fixup_handle_tail
Toshi Kania82eee72016-02-11 14:24:17 -0700374.L_fixup_4b_copy:
375 lea (%rdx,%rcx,4),%rdx
376 jmp .L_fixup_handle_tail
Toshi Kaniee9737c2016-02-11 14:24:16 -0700377.L_fixup_1b_copy:
378 movl %ecx,%edx
379.L_fixup_handle_tail:
380 sfence
Jiri Slaby98ededb2019-09-06 09:55:50 +0200381 jmp .Lcopy_user_handle_tail
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200382 .previous
383
Youquan Song278b9172020-10-06 14:09:07 -0700384 _ASM_EXTABLE_CPY(1b, .L_fixup_4x8b_copy)
385 _ASM_EXTABLE_CPY(2b, .L_fixup_4x8b_copy)
386 _ASM_EXTABLE_CPY(3b, .L_fixup_4x8b_copy)
387 _ASM_EXTABLE_CPY(4b, .L_fixup_4x8b_copy)
388 _ASM_EXTABLE_CPY(5b, .L_fixup_4x8b_copy)
389 _ASM_EXTABLE_CPY(6b, .L_fixup_4x8b_copy)
390 _ASM_EXTABLE_CPY(7b, .L_fixup_4x8b_copy)
391 _ASM_EXTABLE_CPY(8b, .L_fixup_4x8b_copy)
392 _ASM_EXTABLE_CPY(9b, .L_fixup_4x8b_copy)
393 _ASM_EXTABLE_CPY(10b, .L_fixup_4x8b_copy)
394 _ASM_EXTABLE_CPY(11b, .L_fixup_4x8b_copy)
395 _ASM_EXTABLE_CPY(12b, .L_fixup_4x8b_copy)
396 _ASM_EXTABLE_CPY(13b, .L_fixup_4x8b_copy)
397 _ASM_EXTABLE_CPY(14b, .L_fixup_4x8b_copy)
398 _ASM_EXTABLE_CPY(15b, .L_fixup_4x8b_copy)
399 _ASM_EXTABLE_CPY(16b, .L_fixup_4x8b_copy)
400 _ASM_EXTABLE_CPY(20b, .L_fixup_8b_copy)
401 _ASM_EXTABLE_CPY(21b, .L_fixup_8b_copy)
402 _ASM_EXTABLE_CPY(30b, .L_fixup_4b_copy)
403 _ASM_EXTABLE_CPY(31b, .L_fixup_4b_copy)
404 _ASM_EXTABLE_CPY(40b, .L_fixup_1b_copy)
405 _ASM_EXTABLE_CPY(41b, .L_fixup_1b_copy)
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200406SYM_FUNC_END(__copy_user_nocache)
Al Viro784d5692016-01-11 11:04:34 -0500407EXPORT_SYMBOL(__copy_user_nocache)