Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* memmove.S: Simple memmove implementation. |
| 3 | * |
| 4 | * Copyright (C) 1997, 2004 David S. Miller (davem@redhat.com) |
| 5 | * Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz) |
| 6 | */ |
| 7 | |
David S. Miller | 8695c37 | 2012-05-11 20:33:22 -0700 | [diff] [blame] | 8 | #include <linux/linkage.h> |
Al Viro | d3867f04 | 2016-01-16 21:39:30 -0500 | [diff] [blame] | 9 | #include <asm/export.h> |
David S. Miller | 8695c37 | 2012-05-11 20:33:22 -0700 | [diff] [blame] | 10 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | .text |
David S. Miller | 8695c37 | 2012-05-11 20:33:22 -0700 | [diff] [blame] | 12 | ENTRY(memmove) /* o0=dst o1=src o2=len */ |
David S. Miller | 2077cef | 2015-03-23 09:22:10 -0700 | [diff] [blame] | 13 | brz,pn %o2, 99f |
| 14 | mov %o0, %g1 |
| 15 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | cmp %o0, %o1 |
David S. Miller | 2077cef | 2015-03-23 09:22:10 -0700 | [diff] [blame] | 17 | bleu,pt %xcc, 2f |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | add %o1, %o2, %g7 |
| 19 | cmp %g7, %o0 |
| 20 | bleu,pt %xcc, memcpy |
| 21 | add %o0, %o2, %o5 |
| 22 | sub %g7, 1, %o1 |
| 23 | |
| 24 | sub %o5, 1, %o0 |
| 25 | 1: ldub [%o1], %g7 |
| 26 | subcc %o2, 1, %o2 |
| 27 | sub %o1, 1, %o1 |
| 28 | stb %g7, [%o0] |
| 29 | bne,pt %icc, 1b |
| 30 | sub %o0, 1, %o0 |
David S. Miller | 2077cef | 2015-03-23 09:22:10 -0700 | [diff] [blame] | 31 | 99: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | retl |
| 33 | mov %g1, %o0 |
David S. Miller | 2077cef | 2015-03-23 09:22:10 -0700 | [diff] [blame] | 34 | |
| 35 | /* We can't just call memcpy for these memmove cases. On some |
| 36 | * chips the memcpy uses cache initializing stores and when dst |
| 37 | * and src are close enough, those can clobber the source data |
| 38 | * before we've loaded it in. |
| 39 | */ |
| 40 | 2: or %o0, %o1, %g7 |
| 41 | or %o2, %g7, %g7 |
| 42 | andcc %g7, 0x7, %g0 |
| 43 | bne,pn %xcc, 4f |
| 44 | nop |
| 45 | |
| 46 | 3: ldx [%o1], %g7 |
| 47 | add %o1, 8, %o1 |
| 48 | subcc %o2, 8, %o2 |
| 49 | add %o0, 8, %o0 |
| 50 | bne,pt %icc, 3b |
| 51 | stx %g7, [%o0 - 0x8] |
| 52 | ba,a,pt %xcc, 99b |
| 53 | |
| 54 | 4: ldub [%o1], %g7 |
| 55 | add %o1, 1, %o1 |
| 56 | subcc %o2, 1, %o2 |
| 57 | add %o0, 1, %o0 |
| 58 | bne,pt %icc, 4b |
| 59 | stb %g7, [%o0 - 0x1] |
| 60 | ba,a,pt %xcc, 99b |
David S. Miller | 8695c37 | 2012-05-11 20:33:22 -0700 | [diff] [blame] | 61 | ENDPROC(memmove) |
Al Viro | d3867f04 | 2016-01-16 21:39:30 -0500 | [diff] [blame] | 62 | EXPORT_SYMBOL(memmove) |