blob: 48fa37fe0f9b549b443b28ca1998cd0309dac70b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * User address space access functions.
3 * The non-inlined parts of asm-cris/uaccess.h are here.
4 *
5 * Copyright (C) 2000, Axis Communications AB.
6 *
7 * Written by Hans-Peter Nilsson.
8 * Pieces used from memcpy, originally by Kenny Ranerup long time ago.
9 */
10
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080011#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
13/* Asm:s have been tweaked (within the domain of correctness) to give
14 satisfactory results for "gcc version 2.96 20000427 (experimental)".
15
16 Check regularly...
17
18 Note that the PC saved at a bus-fault is the address *after* the
19 faulting instruction, which means the branch-target for instructions in
20 delay-slots for taken branches. Note also that the postincrement in
21 the instruction is performed regardless of bus-fault; the register is
22 seen updated in fault handlers.
23
24 Oh, and on the code formatting issue, to whomever feels like "fixing
25 it" to Conformity: I'm too "lazy", but why don't you go ahead and "fix"
26 string.c too. I just don't think too many people will hack this file
27 for the code format to be an issue. */
28
29
30/* Copy to userspace. This is based on the memcpy used for
31 kernel-to-kernel copying; see "string.c". */
32
Jesper Nilssondbd3c7e2014-10-07 12:20:47 +020033unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn)
Linus Torvalds1da177e2005-04-16 15:20:36 -070034{
35 /* We want the parameters put in special registers.
36 Make sure the compiler is able to make something useful of this.
37 As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
38
39 FIXME: Comment for old gcc version. Check.
Simon Arlott49b4ff32007-10-20 01:08:50 +020040 If gcc was alright, it really would need no temporaries, and no
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 stack space to save stuff on. */
42
43 register char *dst __asm__ ("r13") = pdst;
44 register const char *src __asm__ ("r11") = psrc;
45 register int n __asm__ ("r12") = pn;
46 register int retn __asm__ ("r10") = 0;
47
48
49 /* When src is aligned but not dst, this makes a few extra needless
50 cycles. I believe it would take as many to check that the
51 re-alignment was unnecessary. */
52 if (((unsigned long) dst & 3) != 0
53 /* Don't align if we wouldn't copy more than a few bytes; so we
54 don't have to check further for overflows. */
55 && n >= 3)
56 {
57 if ((unsigned long) dst & 1)
58 {
59 __asm_copy_to_user_1 (dst, src, retn);
60 n--;
61 }
62
63 if ((unsigned long) dst & 2)
64 {
65 __asm_copy_to_user_2 (dst, src, retn);
66 n -= 2;
67 }
68 }
69
70 /* Decide which copying method to use. */
71 if (n >= 44*2) /* Break even between movem and
72 move16 is at 38.7*2, but modulo 44. */
73 {
74 /* For large copies we use 'movem'. */
75
76 /* It is not optimal to tell the compiler about clobbering any
77 registers; that will move the saving/restoring of those registers
78 to the function prologue/epilogue, and make non-movem sizes
79 suboptimal.
80
81 This method is not foolproof; it assumes that the "asm reg"
82 declarations at the beginning of the function really are used
83 here (beware: they may be moved to temporary registers).
84 This way, we do not have to save/move the registers around into
85 temporaries; we can safely use them straight away.
86
87 If you want to check that the allocation was right; then
88 check the equalities in the first comment. It should say
89 "r13=r13, r11=r11, r12=r12". */
90 __asm__ volatile ("\
91 .ifnc %0%1%2%3,$r13$r11$r12$r10 \n\
92 .err \n\
93 .endif \n\
Jesper Nilsson2b05d2b2007-11-14 17:01:30 -080094 \n\
95 ;; Save the registers we'll use in the movem process \n\
96 ;; on the stack. \n\
97 subq 11*4,$sp \n\
98 movem $r10,[$sp] \n\
99 \n\
100 ;; Now we've got this: \n\
101 ;; r11 - src \n\
102 ;; r13 - dst \n\
103 ;; r12 - n \n\
104 \n\
105 ;; Update n for the first loop \n\
106 subq 44,$r12 \n\
107 \n\
108; Since the noted PC of a faulting instruction in a delay-slot of a taken \n\
109; branch, is that of the branch target, we actually point at the from-movem \n\
110; for this case. There is no ambiguity here; if there was a fault in that \n\
111; instruction (meaning a kernel oops), the faulted PC would be the address \n\
112; after *that* movem. \n\
113 \n\
1140: \n\
115 movem [$r11+],$r10 \n\
116 subq 44,$r12 \n\
117 bge 0b \n\
118 movem $r10,[$r13+] \n\
1191: \n\
120 addq 44,$r12 ;; compensate for last loop underflowing n \n\
121 \n\
122 ;; Restore registers from stack \n\
123 movem [$sp+],$r10 \n\
1242: \n\
125 .section .fixup,\"ax\" \n\
126 \n\
127; To provide a correct count in r10 of bytes that failed to be copied, \n\
128; we jump back into the loop if the loop-branch was taken. There is no \n\
129; performance penalty for sany use; the program will segfault soon enough.\n\
130 \n\
1313: \n\
132 move.d [$sp],$r10 \n\
133 addq 44,$r10 \n\
134 move.d $r10,[$sp] \n\
135 jump 0b \n\
1364: \n\
137 movem [$sp+],$r10 \n\
138 addq 44,$r10 \n\
139 addq 44,$r12 \n\
140 jump 2b \n\
141 \n\
142 .previous \n\
143 .section __ex_table,\"a\" \n\
144 .dword 0b,3b \n\
145 .dword 1b,4b \n\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 .previous"
147
148 /* Outputs */ : "=r" (dst), "=r" (src), "=r" (n), "=r" (retn)
149 /* Inputs */ : "0" (dst), "1" (src), "2" (n), "3" (retn));
150
151 }
152
153 /* Either we directly start copying, using dword copying in a loop, or
154 we copy as much as possible with 'movem' and then the last block (<44
155 bytes) is copied here. This will work since 'movem' will have
156 updated SRC, DST and N. */
157
158 while (n >= 16)
159 {
160 __asm_copy_to_user_16 (dst, src, retn);
161 n -= 16;
162 }
163
164 /* Having a separate by-four loops cuts down on cache footprint.
165 FIXME: Test with and without; increasing switch to be 0..15. */
166 while (n >= 4)
167 {
168 __asm_copy_to_user_4 (dst, src, retn);
169 n -= 4;
170 }
171
172 switch (n)
173 {
174 case 0:
175 break;
176 case 1:
177 __asm_copy_to_user_1 (dst, src, retn);
178 break;
179 case 2:
180 __asm_copy_to_user_2 (dst, src, retn);
181 break;
182 case 3:
183 __asm_copy_to_user_3 (dst, src, retn);
184 break;
185 }
186
187 return retn;
188}
Jesper Nilssondbd3c7e2014-10-07 12:20:47 +0200189EXPORT_SYMBOL(__copy_user);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
Al Virob71f1bf2017-03-19 15:28:30 -0400191/* Copy from user to kernel. The return-value is the number of bytes that were
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 inaccessible. */
193
Al Virob71f1bf2017-03-19 15:28:30 -0400194unsigned long __copy_user_in(void *pdst, const void __user *psrc,
Jesper Nilssondbd3c7e2014-10-07 12:20:47 +0200195 unsigned long pn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196{
197 /* We want the parameters put in special registers.
198 Make sure the compiler is able to make something useful of this.
199 As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
200
201 FIXME: Comment for old gcc version. Check.
Simon Arlott49b4ff32007-10-20 01:08:50 +0200202 If gcc was alright, it really would need no temporaries, and no
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 stack space to save stuff on. */
204
205 register char *dst __asm__ ("r13") = pdst;
206 register const char *src __asm__ ("r11") = psrc;
207 register int n __asm__ ("r12") = pn;
208 register int retn __asm__ ("r10") = 0;
209
210 /* The best reason to align src is that we then know that a read-fault
211 was for aligned bytes; there's no 1..3 remaining good bytes to
212 pickle. */
213 if (((unsigned long) src & 3) != 0)
214 {
215 if (((unsigned long) src & 1) && n != 0)
216 {
217 __asm_copy_from_user_1 (dst, src, retn);
218 n--;
Al Virode09be32017-03-19 15:25:35 -0400219 if (retn)
220 goto exception;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 }
222
223 if (((unsigned long) src & 2) && n >= 2)
224 {
225 __asm_copy_from_user_2 (dst, src, retn);
226 n -= 2;
Al Virode09be32017-03-19 15:25:35 -0400227 if (retn)
228 goto exception;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 }
231
232 /* Decide which copying method to use. */
233 if (n >= 44*2) /* Break even between movem and
234 move16 is at 38.7*2, but modulo 44.
235 FIXME: We use move4 now. */
236 {
237 /* For large copies we use 'movem' */
238
239 /* It is not optimal to tell the compiler about clobbering any
240 registers; that will move the saving/restoring of those registers
241 to the function prologue/epilogue, and make non-movem sizes
242 suboptimal.
243
244 This method is not foolproof; it assumes that the "asm reg"
245 declarations at the beginning of the function really are used
246 here (beware: they may be moved to temporary registers).
247 This way, we do not have to save/move the registers around into
248 temporaries; we can safely use them straight away.
249
250 If you want to check that the allocation was right; then
251 check the equalities in the first comment. It should say
252 "r13=r13, r11=r11, r12=r12" */
Jesper Nilsson2b05d2b2007-11-14 17:01:30 -0800253 __asm__ volatile ("\n\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 .ifnc %0%1%2%3,$r13$r11$r12$r10 \n\
255 .err \n\
256 .endif \n\
Jesper Nilsson2b05d2b2007-11-14 17:01:30 -0800257 \n\
258 ;; Save the registers we'll use in the movem process \n\
259 ;; on the stack. \n\
260 subq 11*4,$sp \n\
261 movem $r10,[$sp] \n\
262 \n\
263 ;; Now we've got this: \n\
264 ;; r11 - src \n\
265 ;; r13 - dst \n\
266 ;; r12 - n \n\
267 \n\
268 ;; Update n for the first loop \n\
269 subq 44,$r12 \n\
2700: \n\
271 movem [$r11+],$r10 \n\
2721: \n\
273 subq 44,$r12 \n\
274 bge 0b \n\
275 movem $r10,[$r13+] \n\
276 \n\
277 addq 44,$r12 ;; compensate for last loop underflowing n \n\
278 \n\
279 ;; Restore registers from stack \n\
280 movem [$sp+],$r10 \n\
2814: \n\
282 .section .fixup,\"ax\" \n\
283 \n\
284;; Do not jump back into the loop if we fail. For some uses, we get a \n\
285;; page fault somewhere on the line. Without checking for page limits, \n\
286;; we don't know where, but we need to copy accurately and keep an \n\
287;; accurate count; not just clear the whole line. To do that, we fall \n\
288;; down in the code below, proceeding with smaller amounts. It should \n\
289;; be kept in mind that we have to cater to code like what at one time \n\
290;; was in fs/super.c: \n\
291;; i = size - copy_from_user((void *)page, data, size); \n\
292;; which would cause repeated faults while clearing the remainder of \n\
293;; the SIZE bytes at PAGE after the first fault. \n\
294;; A caveat here is that we must not fall through from a failing page \n\
295;; to a valid page. \n\
296 \n\
2973: \n\
298 movem [$sp+],$r10 \n\
299 addq 44,$r12 ;; Get back count before faulting point. \n\
300 subq 44,$r11 ;; Get back pointer to faulting movem-line. \n\
301 jump 4b ;; Fall through, pretending the fault didn't happen.\n\
302 \n\
303 .previous \n\
304 .section __ex_table,\"a\" \n\
305 .dword 1b,3b \n\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 .previous"
307
308 /* Outputs */ : "=r" (dst), "=r" (src), "=r" (n), "=r" (retn)
309 /* Inputs */ : "0" (dst), "1" (src), "2" (n), "3" (retn));
310
311 }
312
313 /* Either we directly start copying here, using dword copying in a loop,
314 or we copy as much as possible with 'movem' and then the last block
315 (<44 bytes) is copied here. This will work since 'movem' will have
316 updated src, dst and n. (Except with failing src.)
317
318 Since we want to keep src accurate, we can't use
319 __asm_copy_from_user_N with N != (1, 2, 4); it updates dst and
320 retn, but not src (by design; it's value is ignored elsewhere). */
321
322 while (n >= 4)
323 {
324 __asm_copy_from_user_4 (dst, src, retn);
325 n -= 4;
326
327 if (retn)
Al Virode09be32017-03-19 15:25:35 -0400328 goto exception;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 }
330
331 /* If we get here, there were no memory read faults. */
332 switch (n)
333 {
334 /* These copies are at least "naturally aligned" (so we don't have
335 to check each byte), due to the src alignment code before the
336 movem loop. The *_3 case *will* get the correct count for retn. */
337 case 0:
338 /* This case deliberately left in (if you have doubts check the
339 generated assembly code). */
340 break;
341 case 1:
342 __asm_copy_from_user_1 (dst, src, retn);
343 break;
344 case 2:
345 __asm_copy_from_user_2 (dst, src, retn);
346 break;
347 case 3:
348 __asm_copy_from_user_3 (dst, src, retn);
349 break;
350 }
351
352 /* If we get here, retn correctly reflects the number of failing
353 bytes. */
354 return retn;
355
Al Virode09be32017-03-19 15:25:35 -0400356exception:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 return retn + n;
358}
Al Virob71f1bf2017-03-19 15:28:30 -0400359EXPORT_SYMBOL(__copy_user_in);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
361/* Zero userspace. */
Jesper Nilssondbd3c7e2014-10-07 12:20:47 +0200362unsigned long __do_clear_user(void __user *pto, unsigned long pn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363{
364 /* We want the parameters put in special registers.
365 Make sure the compiler is able to make something useful of this.
366 As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
367
368 FIXME: Comment for old gcc version. Check.
Simon Arlott49b4ff32007-10-20 01:08:50 +0200369 If gcc was alright, it really would need no temporaries, and no
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 stack space to save stuff on. */
371
372 register char *dst __asm__ ("r13") = pto;
373 register int n __asm__ ("r12") = pn;
374 register int retn __asm__ ("r10") = 0;
375
376
377 if (((unsigned long) dst & 3) != 0
378 /* Don't align if we wouldn't copy more than a few bytes. */
379 && n >= 3)
380 {
381 if ((unsigned long) dst & 1)
382 {
383 __asm_clear_1 (dst, retn);
384 n--;
385 }
386
387 if ((unsigned long) dst & 2)
388 {
389 __asm_clear_2 (dst, retn);
390 n -= 2;
391 }
392 }
393
394 /* Decide which copying method to use.
395 FIXME: This number is from the "ordinary" kernel memset. */
396 if (n >= (1*48))
397 {
398 /* For large clears we use 'movem' */
399
400 /* It is not optimal to tell the compiler about clobbering any
401 call-saved registers; that will move the saving/restoring of
402 those registers to the function prologue/epilogue, and make
403 non-movem sizes suboptimal.
404
405 This method is not foolproof; it assumes that the "asm reg"
406 declarations at the beginning of the function really are used
407 here (beware: they may be moved to temporary registers).
408 This way, we do not have to save/move the registers around into
409 temporaries; we can safely use them straight away.
410
411 If you want to check that the allocation was right; then
412 check the equalities in the first comment. It should say
413 something like "r13=r13, r11=r11, r12=r12". */
Jesper Nilsson2b05d2b2007-11-14 17:01:30 -0800414 __asm__ volatile ("\n\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 .ifnc %0%1%2,$r13$r12$r10 \n\
416 .err \n\
417 .endif \n\
Jesper Nilsson2b05d2b2007-11-14 17:01:30 -0800418 \n\
419 ;; Save the registers we'll clobber in the movem process \n\
420 ;; on the stack. Don't mention them to gcc, it will only be \n\
421 ;; upset. \n\
422 subq 11*4,$sp \n\
423 movem $r10,[$sp] \n\
424 \n\
425 clear.d $r0 \n\
426 clear.d $r1 \n\
427 clear.d $r2 \n\
428 clear.d $r3 \n\
429 clear.d $r4 \n\
430 clear.d $r5 \n\
431 clear.d $r6 \n\
432 clear.d $r7 \n\
433 clear.d $r8 \n\
434 clear.d $r9 \n\
435 clear.d $r10 \n\
436 clear.d $r11 \n\
437 \n\
438 ;; Now we've got this: \n\
439 ;; r13 - dst \n\
440 ;; r12 - n \n\
441 \n\
442 ;; Update n for the first loop \n\
443 subq 12*4,$r12 \n\
4440: \n\
445 subq 12*4,$r12 \n\
446 bge 0b \n\
447 movem $r11,[$r13+] \n\
4481: \n\
449 addq 12*4,$r12 ;; compensate for last loop underflowing n\n\
450 \n\
451 ;; Restore registers from stack \n\
452 movem [$sp+],$r10 \n\
4532: \n\
454 .section .fixup,\"ax\" \n\
4553: \n\
456 move.d [$sp],$r10 \n\
457 addq 12*4,$r10 \n\
458 move.d $r10,[$sp] \n\
459 clear.d $r10 \n\
460 jump 0b \n\
461 \n\
4624: \n\
463 movem [$sp+],$r10 \n\
464 addq 12*4,$r10 \n\
465 addq 12*4,$r12 \n\
466 jump 2b \n\
467 \n\
468 .previous \n\
469 .section __ex_table,\"a\" \n\
470 .dword 0b,3b \n\
471 .dword 1b,4b \n\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 .previous"
473
474 /* Outputs */ : "=r" (dst), "=r" (n), "=r" (retn)
475 /* Inputs */ : "0" (dst), "1" (n), "2" (retn)
476 /* Clobber */ : "r11");
477 }
478
479 while (n >= 16)
480 {
481 __asm_clear_16 (dst, retn);
482 n -= 16;
483 }
484
485 /* Having a separate by-four loops cuts down on cache footprint.
486 FIXME: Test with and without; increasing switch to be 0..15. */
487 while (n >= 4)
488 {
489 __asm_clear_4 (dst, retn);
490 n -= 4;
491 }
492
493 switch (n)
494 {
495 case 0:
496 break;
497 case 1:
498 __asm_clear_1 (dst, retn);
499 break;
500 case 2:
501 __asm_clear_2 (dst, retn);
502 break;
503 case 3:
504 __asm_clear_3 (dst, retn);
505 break;
506 }
507
508 return retn;
509}
Jesper Nilssondbd3c7e2014-10-07 12:20:47 +0200510EXPORT_SYMBOL(__do_clear_user);