Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* Optimized version of the standard memset() function. |
| 2 | |
| 3 | Copyright (c) 2002 Hewlett-Packard Co/CERN |
| 4 | Sverre Jarp <Sverre.Jarp@cern.ch> |
| 5 | |
| 6 | Return: dest |
| 7 | |
| 8 | Inputs: |
| 9 | in0: dest |
| 10 | in1: value |
| 11 | in2: count |
| 12 | |
| 13 | The algorithm is fairly straightforward: set byte by byte until we |
| 14 | we get to a 16B-aligned address, then loop on 128 B chunks using an |
| 15 | early store as prefetching, then loop on 32B chucks, then clear remaining |
| 16 | words, finally clear remaining bytes. |
| 17 | Since a stf.spill f0 can store 16B in one go, we use this instruction |
| 18 | to get peak speed when value = 0. */ |
| 19 | |
| 20 | #include <asm/asmmacro.h> |
Al Viro | e007c53 | 2016-01-17 01:13:41 -0500 | [diff] [blame] | 21 | #include <asm/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #undef ret |
| 23 | |
| 24 | #define dest in0 |
| 25 | #define value in1 |
| 26 | #define cnt in2 |
| 27 | |
| 28 | #define tmp r31 |
| 29 | #define save_lc r30 |
| 30 | #define ptr0 r29 |
| 31 | #define ptr1 r28 |
| 32 | #define ptr2 r27 |
| 33 | #define ptr3 r26 |
| 34 | #define ptr9 r24 |
| 35 | #define loopcnt r23 |
| 36 | #define linecnt r22 |
| 37 | #define bytecnt r21 |
| 38 | |
| 39 | #define fvalue f6 |
| 40 | |
| 41 | // This routine uses only scratch predicate registers (p6 - p15) |
| 42 | #define p_scr p6 // default register for same-cycle branches |
| 43 | #define p_nz p7 |
| 44 | #define p_zr p8 |
| 45 | #define p_unalgn p9 |
| 46 | #define p_y p11 |
| 47 | #define p_n p12 |
| 48 | #define p_yy p13 |
| 49 | #define p_nn p14 |
| 50 | |
| 51 | #define MIN1 15 |
| 52 | #define MIN1P1HALF 8 |
| 53 | #define LINE_SIZE 128 |
| 54 | #define LSIZE_SH 7 // shift amount |
| 55 | #define PREF_AHEAD 8 |
| 56 | |
| 57 | GLOBAL_ENTRY(memset) |
| 58 | { .mmi |
| 59 | .prologue |
| 60 | alloc tmp = ar.pfs, 3, 0, 0, 0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | lfetch.nt1 [dest] // |
| 62 | .save ar.lc, save_lc |
| 63 | mov.i save_lc = ar.lc |
David Mosberger-Tang | 9df6f70 | 2005-03-25 00:16:00 -0700 | [diff] [blame] | 64 | .body |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | } { .mmi |
| 66 | mov ret0 = dest // return value |
| 67 | cmp.ne p_nz, p_zr = value, r0 // use stf.spill if value is zero |
| 68 | cmp.eq p_scr, p0 = cnt, r0 |
| 69 | ;; } |
| 70 | { .mmi |
| 71 | and ptr2 = -(MIN1+1), dest // aligned address |
| 72 | and tmp = MIN1, dest // prepare to check for correct alignment |
| 73 | tbit.nz p_y, p_n = dest, 0 // Do we have an odd address? (M_B_U) |
| 74 | } { .mib |
| 75 | mov ptr1 = dest |
| 76 | mux1 value = value, @brcst // create 8 identical bytes in word |
| 77 | (p_scr) br.ret.dpnt.many rp // return immediately if count = 0 |
| 78 | ;; } |
| 79 | { .mib |
| 80 | cmp.ne p_unalgn, p0 = tmp, r0 // |
| 81 | } { .mib |
| 82 | sub bytecnt = (MIN1+1), tmp // NB: # of bytes to move is 1 higher than loopcnt |
| 83 | cmp.gt p_scr, p0 = 16, cnt // is it a minimalistic task? |
| 84 | (p_scr) br.cond.dptk.many .move_bytes_unaligned // go move just a few (M_B_U) |
| 85 | ;; } |
| 86 | { .mmi |
| 87 | (p_unalgn) add ptr1 = (MIN1+1), ptr2 // after alignment |
| 88 | (p_unalgn) add ptr2 = MIN1P1HALF, ptr2 // after alignment |
| 89 | (p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 3 // should we do a st8 ? |
| 90 | ;; } |
| 91 | { .mib |
| 92 | (p_y) add cnt = -8, cnt // |
| 93 | (p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 2 // should we do a st4 ? |
| 94 | } { .mib |
| 95 | (p_y) st8 [ptr2] = value,-4 // |
| 96 | (p_n) add ptr2 = 4, ptr2 // |
| 97 | ;; } |
| 98 | { .mib |
| 99 | (p_yy) add cnt = -4, cnt // |
| 100 | (p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 1 // should we do a st2 ? |
| 101 | } { .mib |
| 102 | (p_yy) st4 [ptr2] = value,-2 // |
| 103 | (p_nn) add ptr2 = 2, ptr2 // |
| 104 | ;; } |
| 105 | { .mmi |
| 106 | mov tmp = LINE_SIZE+1 // for compare |
| 107 | (p_y) add cnt = -2, cnt // |
| 108 | (p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 0 // should we do a st1 ? |
| 109 | } { .mmi |
| 110 | setf.sig fvalue=value // transfer value to FLP side |
| 111 | (p_y) st2 [ptr2] = value,-1 // |
| 112 | (p_n) add ptr2 = 1, ptr2 // |
| 113 | ;; } |
| 114 | |
| 115 | { .mmi |
| 116 | (p_yy) st1 [ptr2] = value // |
| 117 | cmp.gt p_scr, p0 = tmp, cnt // is it a minimalistic task? |
| 118 | } { .mbb |
| 119 | (p_yy) add cnt = -1, cnt // |
| 120 | (p_scr) br.cond.dpnt.many .fraction_of_line // go move just a few |
| 121 | ;; } |
| 122 | |
| 123 | { .mib |
| 124 | nop.m 0 |
| 125 | shr.u linecnt = cnt, LSIZE_SH |
| 126 | (p_zr) br.cond.dptk.many .l1b // Jump to use stf.spill |
| 127 | ;; } |
| 128 | |
| 129 | TEXT_ALIGN(32) // --------------------- // L1A: store ahead into cache lines; fill later |
| 130 | { .mmi |
| 131 | and tmp = -(LINE_SIZE), cnt // compute end of range |
| 132 | mov ptr9 = ptr1 // used for prefetching |
| 133 | and cnt = (LINE_SIZE-1), cnt // remainder |
| 134 | } { .mmi |
| 135 | mov loopcnt = PREF_AHEAD-1 // default prefetch loop |
| 136 | cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value |
| 137 | ;; } |
| 138 | { .mmi |
| 139 | (p_scr) add loopcnt = -1, linecnt // |
| 140 | add ptr2 = 8, ptr1 // start of stores (beyond prefetch stores) |
| 141 | add ptr1 = tmp, ptr1 // first address beyond total range |
| 142 | ;; } |
| 143 | { .mmi |
| 144 | add tmp = -1, linecnt // next loop count |
| 145 | mov.i ar.lc = loopcnt // |
| 146 | ;; } |
| 147 | .pref_l1a: |
| 148 | { .mib |
| 149 | stf8 [ptr9] = fvalue, 128 // Do stores one cache line apart |
| 150 | nop.i 0 |
| 151 | br.cloop.dptk.few .pref_l1a |
| 152 | ;; } |
| 153 | { .mmi |
| 154 | add ptr0 = 16, ptr2 // Two stores in parallel |
| 155 | mov.i ar.lc = tmp // |
| 156 | ;; } |
| 157 | .l1ax: |
| 158 | { .mmi |
| 159 | stf8 [ptr2] = fvalue, 8 |
| 160 | stf8 [ptr0] = fvalue, 8 |
| 161 | ;; } |
| 162 | { .mmi |
| 163 | stf8 [ptr2] = fvalue, 24 |
| 164 | stf8 [ptr0] = fvalue, 24 |
| 165 | ;; } |
| 166 | { .mmi |
| 167 | stf8 [ptr2] = fvalue, 8 |
| 168 | stf8 [ptr0] = fvalue, 8 |
| 169 | ;; } |
| 170 | { .mmi |
| 171 | stf8 [ptr2] = fvalue, 24 |
| 172 | stf8 [ptr0] = fvalue, 24 |
| 173 | ;; } |
| 174 | { .mmi |
| 175 | stf8 [ptr2] = fvalue, 8 |
| 176 | stf8 [ptr0] = fvalue, 8 |
| 177 | ;; } |
| 178 | { .mmi |
| 179 | stf8 [ptr2] = fvalue, 24 |
| 180 | stf8 [ptr0] = fvalue, 24 |
| 181 | ;; } |
| 182 | { .mmi |
| 183 | stf8 [ptr2] = fvalue, 8 |
| 184 | stf8 [ptr0] = fvalue, 32 |
| 185 | cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching? |
| 186 | ;; } |
| 187 | { .mmb |
| 188 | stf8 [ptr2] = fvalue, 24 |
| 189 | (p_scr) stf8 [ptr9] = fvalue, 128 |
| 190 | br.cloop.dptk.few .l1ax |
| 191 | ;; } |
| 192 | { .mbb |
| 193 | cmp.le p_scr, p0 = 8, cnt // just a few bytes left ? |
| 194 | (p_scr) br.cond.dpnt.many .fraction_of_line // Branch no. 2 |
| 195 | br.cond.dpnt.many .move_bytes_from_alignment // Branch no. 3 |
| 196 | ;; } |
| 197 | |
| 198 | TEXT_ALIGN(32) |
| 199 | .l1b: // ------------------------------------ // L1B: store ahead into cache lines; fill later |
| 200 | { .mmi |
| 201 | and tmp = -(LINE_SIZE), cnt // compute end of range |
| 202 | mov ptr9 = ptr1 // used for prefetching |
| 203 | and cnt = (LINE_SIZE-1), cnt // remainder |
| 204 | } { .mmi |
| 205 | mov loopcnt = PREF_AHEAD-1 // default prefetch loop |
| 206 | cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value |
| 207 | ;; } |
| 208 | { .mmi |
| 209 | (p_scr) add loopcnt = -1, linecnt |
| 210 | add ptr2 = 16, ptr1 // start of stores (beyond prefetch stores) |
| 211 | add ptr1 = tmp, ptr1 // first address beyond total range |
| 212 | ;; } |
| 213 | { .mmi |
| 214 | add tmp = -1, linecnt // next loop count |
| 215 | mov.i ar.lc = loopcnt |
| 216 | ;; } |
| 217 | .pref_l1b: |
| 218 | { .mib |
| 219 | stf.spill [ptr9] = f0, 128 // Do stores one cache line apart |
| 220 | nop.i 0 |
| 221 | br.cloop.dptk.few .pref_l1b |
| 222 | ;; } |
| 223 | { .mmi |
| 224 | add ptr0 = 16, ptr2 // Two stores in parallel |
| 225 | mov.i ar.lc = tmp |
| 226 | ;; } |
| 227 | .l1bx: |
| 228 | { .mmi |
| 229 | stf.spill [ptr2] = f0, 32 |
| 230 | stf.spill [ptr0] = f0, 32 |
| 231 | ;; } |
| 232 | { .mmi |
| 233 | stf.spill [ptr2] = f0, 32 |
| 234 | stf.spill [ptr0] = f0, 32 |
| 235 | ;; } |
| 236 | { .mmi |
| 237 | stf.spill [ptr2] = f0, 32 |
| 238 | stf.spill [ptr0] = f0, 64 |
| 239 | cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching? |
| 240 | ;; } |
| 241 | { .mmb |
| 242 | stf.spill [ptr2] = f0, 32 |
| 243 | (p_scr) stf.spill [ptr9] = f0, 128 |
| 244 | br.cloop.dptk.few .l1bx |
| 245 | ;; } |
| 246 | { .mib |
| 247 | cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ? |
| 248 | (p_scr) br.cond.dpnt.many .move_bytes_from_alignment // |
| 249 | ;; } |
| 250 | |
| 251 | .fraction_of_line: |
| 252 | { .mib |
| 253 | add ptr2 = 16, ptr1 |
| 254 | shr.u loopcnt = cnt, 5 // loopcnt = cnt / 32 |
| 255 | ;; } |
| 256 | { .mib |
| 257 | cmp.eq p_scr, p0 = loopcnt, r0 |
| 258 | add loopcnt = -1, loopcnt |
| 259 | (p_scr) br.cond.dpnt.many .store_words |
| 260 | ;; } |
| 261 | { .mib |
| 262 | and cnt = 0x1f, cnt // compute the remaining cnt |
| 263 | mov.i ar.lc = loopcnt |
| 264 | ;; } |
| 265 | TEXT_ALIGN(32) |
| 266 | .l2: // ------------------------------------ // L2A: store 32B in 2 cycles |
| 267 | { .mmb |
| 268 | stf8 [ptr1] = fvalue, 8 |
| 269 | stf8 [ptr2] = fvalue, 8 |
| 270 | ;; } { .mmb |
| 271 | stf8 [ptr1] = fvalue, 24 |
| 272 | stf8 [ptr2] = fvalue, 24 |
| 273 | br.cloop.dptk.many .l2 |
| 274 | ;; } |
| 275 | .store_words: |
| 276 | { .mib |
| 277 | cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ? |
| 278 | (p_scr) br.cond.dpnt.many .move_bytes_from_alignment // Branch |
| 279 | ;; } |
| 280 | |
| 281 | { .mmi |
| 282 | stf8 [ptr1] = fvalue, 8 // store |
| 283 | cmp.le p_y, p_n = 16, cnt |
| 284 | add cnt = -8, cnt // subtract |
| 285 | ;; } |
| 286 | { .mmi |
| 287 | (p_y) stf8 [ptr1] = fvalue, 8 // store |
| 288 | (p_y) cmp.le.unc p_yy, p_nn = 16, cnt |
| 289 | (p_y) add cnt = -8, cnt // subtract |
| 290 | ;; } |
| 291 | { .mmi // store |
| 292 | (p_yy) stf8 [ptr1] = fvalue, 8 |
| 293 | (p_yy) add cnt = -8, cnt // subtract |
| 294 | ;; } |
| 295 | |
| 296 | .move_bytes_from_alignment: |
| 297 | { .mib |
| 298 | cmp.eq p_scr, p0 = cnt, r0 |
| 299 | tbit.nz.unc p_y, p0 = cnt, 2 // should we terminate with a st4 ? |
| 300 | (p_scr) br.cond.dpnt.few .restore_and_exit |
| 301 | ;; } |
| 302 | { .mib |
| 303 | (p_y) st4 [ptr1] = value,4 |
| 304 | tbit.nz.unc p_yy, p0 = cnt, 1 // should we terminate with a st2 ? |
| 305 | ;; } |
| 306 | { .mib |
| 307 | (p_yy) st2 [ptr1] = value,2 |
| 308 | tbit.nz.unc p_y, p0 = cnt, 0 // should we terminate with a st1 ? |
| 309 | ;; } |
| 310 | |
| 311 | { .mib |
| 312 | (p_y) st1 [ptr1] = value |
| 313 | ;; } |
| 314 | .restore_and_exit: |
| 315 | { .mib |
| 316 | nop.m 0 |
| 317 | mov.i ar.lc = save_lc |
| 318 | br.ret.sptk.many rp |
| 319 | ;; } |
| 320 | |
| 321 | .move_bytes_unaligned: |
| 322 | { .mmi |
| 323 | .pred.rel "mutex",p_y, p_n |
| 324 | .pred.rel "mutex",p_yy, p_nn |
| 325 | (p_n) cmp.le p_yy, p_nn = 4, cnt |
| 326 | (p_y) cmp.le p_yy, p_nn = 5, cnt |
| 327 | (p_n) add ptr2 = 2, ptr1 |
| 328 | } { .mmi |
| 329 | (p_y) add ptr2 = 3, ptr1 |
| 330 | (p_y) st1 [ptr1] = value, 1 // fill 1 (odd-aligned) byte [15, 14 (or less) left] |
| 331 | (p_y) add cnt = -1, cnt |
| 332 | ;; } |
| 333 | { .mmi |
| 334 | (p_yy) cmp.le.unc p_y, p0 = 8, cnt |
| 335 | add ptr3 = ptr1, cnt // prepare last store |
| 336 | mov.i ar.lc = save_lc |
| 337 | } { .mmi |
| 338 | (p_yy) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes |
| 339 | (p_yy) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes [11, 10 (o less) left] |
| 340 | (p_yy) add cnt = -4, cnt |
| 341 | ;; } |
| 342 | { .mmi |
| 343 | (p_y) cmp.le.unc p_yy, p0 = 8, cnt |
| 344 | add ptr3 = -1, ptr3 // last store |
| 345 | tbit.nz p_scr, p0 = cnt, 1 // will there be a st2 at the end ? |
| 346 | } { .mmi |
| 347 | (p_y) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes |
| 348 | (p_y) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes [7, 6 (or less) left] |
| 349 | (p_y) add cnt = -4, cnt |
| 350 | ;; } |
| 351 | { .mmi |
| 352 | (p_yy) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes |
| 353 | (p_yy) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes [3, 2 (or less) left] |
| 354 | tbit.nz p_y, p0 = cnt, 0 // will there be a st1 at the end ? |
| 355 | } { .mmi |
| 356 | (p_yy) add cnt = -4, cnt |
| 357 | ;; } |
| 358 | { .mmb |
| 359 | (p_scr) st2 [ptr1] = value // fill 2 (aligned) bytes |
| 360 | (p_y) st1 [ptr3] = value // fill last byte (using ptr3) |
| 361 | br.ret.sptk.many rp |
| 362 | } |
| 363 | END(memset) |
Al Viro | e007c53 | 2016-01-17 01:13:41 -0500 | [diff] [blame] | 364 | EXPORT_SYMBOL(memset) |