blob: 8573d59c9ed17098bfd1b38243b9c5a178fdcb8f [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001/* SPDX-License-Identifier: GPL-2.0-only */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Cache flushing routines.
4 *
David Mosberger-Tang20746152005-02-18 19:09:00 -07005 * Copyright (C) 1999-2001, 2005 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
Zoltan Menyhart08357f82005-06-03 05:36:00 -07007 *
8 * 05/28/05 Zoltan Menyhart Dynamic stride size
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 */
Zoltan Menyhart08357f82005-06-03 05:36:00 -070010
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <asm/asmmacro.h>
Al Viroe007c532016-01-17 01:13:41 -050012#include <asm/export.h>
Zoltan Menyhart08357f82005-06-03 05:36:00 -070013
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
15 /*
16 * flush_icache_range(start,end)
Zoltan Menyhart08357f82005-06-03 05:36:00 -070017 *
18 * Make i-cache(s) coherent with d-caches.
19 *
20 * Must deal with range from start to end-1 but nothing else (need to
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 * be careful not to touch addresses that may be unmapped).
Zoltan Menyhart08357f82005-06-03 05:36:00 -070022 *
23 * Note: "in0" and "in1" are preserved for debugging purposes.
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 */
Prasanna S Panchamukhi1f7ad572005-09-06 15:19:30 -070025 .section .kprobes.text,"ax"
Linus Torvalds1da177e2005-04-16 15:20:36 -070026GLOBAL_ENTRY(flush_icache_range)
Zoltan Menyhart08357f82005-06-03 05:36:00 -070027
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 .prologue
Zoltan Menyhart08357f82005-06-03 05:36:00 -070029 alloc r2=ar.pfs,2,0,0,0
30 movl r3=ia64_i_cache_stride_shift
31 mov r21=1
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 ;;
Zoltan Menyhart08357f82005-06-03 05:36:00 -070033 ld8 r20=[r3] // r20: stride shift
34 sub r22=in1,r0,1 // last byte address
35 ;;
36 shr.u r23=in0,r20 // start / (stride size)
37 shr.u r22=r22,r20 // (last byte address) / (stride size)
38 shl r21=r21,r20 // r21: stride size of the i-cache(s)
39 ;;
40 sub r8=r22,r23 // number of strides - 1
41 shl r24=r23,r20 // r24: addresses for "fc.i" =
42 // "start" rounded down to stride boundary
43 .save ar.lc,r3
44 mov r3=ar.lc // save ar.lc
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 ;;
46
47 .body
Zoltan Menyhart08357f82005-06-03 05:36:00 -070048 mov ar.lc=r8
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 ;;
Zoltan Menyhart08357f82005-06-03 05:36:00 -070050 /*
51 * 32 byte aligned loop, even number of (actually 2) bundles
52 */
53.Loop: fc.i r24 // issuable on M0 only
54 add r24=r21,r24 // we flush "stride size" bytes per iteration
55 nop.i 0
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 br.cloop.sptk.few .Loop
57 ;;
58 sync.i
59 ;;
60 srlz.i
61 ;;
Zoltan Menyhart08357f82005-06-03 05:36:00 -070062 mov ar.lc=r3 // restore ar.lc
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 br.ret.sptk.many rp
64END(flush_icache_range)
Al Viroe007c532016-01-17 01:13:41 -050065EXPORT_SYMBOL_GPL(flush_icache_range)
Fenghua Yu62fdd762008-10-17 12:14:13 -070066
67 /*
68 * clflush_cache_range(start,size)
69 *
70 * Flush cache lines from start to start+size-1.
71 *
72 * Must deal with range from start to start+size-1 but nothing else
73 * (need to be careful not to touch addresses that may be
74 * unmapped).
75 *
76 * Note: "in0" and "in1" are preserved for debugging purposes.
77 */
78 .section .kprobes.text,"ax"
79GLOBAL_ENTRY(clflush_cache_range)
80
81 .prologue
82 alloc r2=ar.pfs,2,0,0,0
83 movl r3=ia64_cache_stride_shift
84 mov r21=1
85 add r22=in1,in0
86 ;;
87 ld8 r20=[r3] // r20: stride shift
88 sub r22=r22,r0,1 // last byte address
89 ;;
90 shr.u r23=in0,r20 // start / (stride size)
91 shr.u r22=r22,r20 // (last byte address) / (stride size)
92 shl r21=r21,r20 // r21: stride size of the i-cache(s)
93 ;;
94 sub r8=r22,r23 // number of strides - 1
95 shl r24=r23,r20 // r24: addresses for "fc" =
96 // "start" rounded down to stride
97 // boundary
98 .save ar.lc,r3
99 mov r3=ar.lc // save ar.lc
100 ;;
101
102 .body
103 mov ar.lc=r8
104 ;;
105 /*
106 * 32 byte aligned loop, even number of (actually 2) bundles
107 */
108.Loop_fc:
109 fc r24 // issuable on M0 only
110 add r24=r21,r24 // we flush "stride size" bytes per iteration
111 nop.i 0
112 br.cloop.sptk.few .Loop_fc
113 ;;
114 sync.i
115 ;;
116 srlz.i
117 ;;
118 mov ar.lc=r3 // restore ar.lc
119 br.ret.sptk.many rp
120END(clflush_cache_range)