blob: 7db283b46ebde8daccf779525e82526e47ad2722 [file] [log] [blame]
Vineet Gupta95d69762013-01-18 15:12:19 +05301/*
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05302 * ARC Cache Management
Vineet Gupta95d69762013-01-18 15:12:19 +05303 *
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05304 * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
Vineet Gupta95d69762013-01-18 15:12:19 +05305 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
Vineet Gupta95d69762013-01-18 15:12:19 +053010 */
11
12#include <linux/module.h>
13#include <linux/mm.h>
14#include <linux/sched.h>
15#include <linux/cache.h>
16#include <linux/mmu_context.h>
17#include <linux/syscalls.h>
18#include <linux/uaccess.h>
Vineet Gupta4102b532013-05-09 21:54:51 +053019#include <linux/pagemap.h>
Vineet Gupta95d69762013-01-18 15:12:19 +053020#include <asm/cacheflush.h>
21#include <asm/cachectl.h>
22#include <asm/setup.h>
23
Vineet Gupta0d771172014-08-29 10:55:15 +053024#ifdef CONFIG_ISA_ARCV2
25#define USE_RGN_FLSH 1
26#endif
27
Vineet Gupta795f4552015-04-03 12:37:07 +030028static int l2_line_sz;
Vineet Guptacf986d42016-10-13 15:58:59 -070029static int ioc_exists;
Vineet Guptad0e73e22017-01-17 11:09:18 -080030int slc_enable = 1, ioc_enable = 1;
Vineet Guptadeaf7562015-10-24 19:31:16 +053031unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
Vineet Gupta26c01c42016-08-26 15:41:29 -070032unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
Vineet Gupta795f4552015-04-03 12:37:07 +030033
Vineet Gupta28b4af72015-09-14 18:43:42 -070034void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
Vineet Gupta7d3d1622017-01-23 19:32:23 -080035 unsigned long sz, const int op, const int full_page);
Vineet Guptabcc4d652015-06-04 14:39:15 +053036
Vineet Guptaf5db19e2016-03-16 15:04:39 +053037void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
38void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
39void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030040
Vineet Guptac3441ed2014-02-24 11:42:50 +080041char *arc_cache_mumbojumbo(int c, char *buf, int len)
Vineet Guptaaf617422013-01-18 15:12:24 +053042{
43 int n = 0;
Vineet Guptad1f317d2015-04-06 17:23:57 +053044 struct cpuinfo_arc_cache *p;
Vineet Guptaaf617422013-01-18 15:12:24 +053045
Vineet Guptada40ff42014-06-27 15:49:47 +053046#define PR_CACHE(p, cfg, str) \
Vineet Guptaf64915b2016-12-19 11:24:08 -080047 if (!(p)->line_len) \
Vineet Guptaaf617422013-01-18 15:12:24 +053048 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
49 else \
50 n += scnprintf(buf + n, len - n, \
Vineet Guptada40ff42014-06-27 15:49:47 +053051 str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
52 (p)->sz_k, (p)->assoc, (p)->line_len, \
53 (p)->vipt ? "VIPT" : "PIPT", \
54 (p)->alias ? " aliasing" : "", \
Vineet Gupta964cf282015-10-02 19:20:27 +053055 IS_USED_CFG(cfg));
Vineet Guptaaf617422013-01-18 15:12:24 +053056
Vineet Guptada40ff42014-06-27 15:49:47 +053057 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
58 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
Vineet Guptaaf617422013-01-18 15:12:24 +053059
Vineet Guptad1f317d2015-04-06 17:23:57 +053060 p = &cpuinfo_arc700[c].slc;
Vineet Guptaf64915b2016-12-19 11:24:08 -080061 if (p->line_len)
Vineet Guptad1f317d2015-04-06 17:23:57 +053062 n += scnprintf(buf + n, len - n,
Vineet Gupta79335a22015-06-04 18:30:23 +053063 "SLC\t\t: %uK, %uB Line%s\n",
64 p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
Vineet Guptad1f317d2015-04-06 17:23:57 +053065
Vineet Gupta711c1f22016-10-13 15:53:02 -070066 n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
67 perip_base,
68 IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency "));
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030069
Vineet Guptaaf617422013-01-18 15:12:24 +053070 return buf;
71}
72
Vineet Gupta95d69762013-01-18 15:12:19 +053073/*
74 * Read the Cache Build Confuration Registers, Decode them and save into
75 * the cpuinfo structure for later use.
76 * No Validation done here, simply read/convert the BCRs
77 */
Vineet Guptafd0881a22015-08-21 15:06:43 +053078static void read_decode_cache_bcr_arcv2(int cpu)
Vineet Gupta95d69762013-01-18 15:12:19 +053079{
Vineet Guptafd0881a22015-08-21 15:06:43 +053080 struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc;
Vineet Guptad1f317d2015-04-06 17:23:57 +053081 struct bcr_generic sbcr;
82
83 struct bcr_slc_cfg {
84#ifdef CONFIG_CPU_BIG_ENDIAN
85 unsigned int pad:24, way:2, lsz:2, sz:4;
86#else
87 unsigned int sz:4, lsz:2, way:2, pad:24;
88#endif
89 } slc_cfg;
90
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030091 struct bcr_clust_cfg {
92#ifdef CONFIG_CPU_BIG_ENDIAN
93 unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
94#else
95 unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
96#endif
97 } cbcr;
98
Vineet Gupta26c01c42016-08-26 15:41:29 -070099 struct bcr_volatile {
100#ifdef CONFIG_CPU_BIG_ENDIAN
101 unsigned int start:4, limit:4, pad:22, order:1, disable:1;
102#else
103 unsigned int disable:1, order:1, pad:22, limit:4, start:4;
104#endif
105 } vol;
106
107
Vineet Guptafd0881a22015-08-21 15:06:43 +0530108 READ_BCR(ARC_REG_SLC_BCR, sbcr);
109 if (sbcr.ver) {
110 READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
Vineet Guptafd0881a22015-08-21 15:06:43 +0530111 p_slc->sz_k = 128 << slc_cfg.sz;
112 l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
113 }
114
115 READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
Vineet Guptacf986d42016-10-13 15:58:59 -0700116 if (cbcr.c)
Vineet Guptafd0881a22015-08-21 15:06:43 +0530117 ioc_exists = 1;
Vineet Guptacf986d42016-10-13 15:58:59 -0700118 else
119 ioc_enable = 0;
Vineet Guptadeaf7562015-10-24 19:31:16 +0530120
Vineet Gupta26c01c42016-08-26 15:41:29 -0700121 /* HS 2.0 didn't have AUX_VOL */
122 if (cpuinfo_arc700[cpu].core.family > 0x51) {
123 READ_BCR(AUX_VOL, vol);
124 perip_base = vol.start << 28;
125 /* HS 3.0 has limit and strict-ordering fields */
126 if (cpuinfo_arc700[cpu].core.family > 0x52)
127 perip_end = (vol.limit << 28) - 1;
128 }
Vineet Guptafd0881a22015-08-21 15:06:43 +0530129}
130
131void read_decode_cache_bcr(void)
132{
133 struct cpuinfo_arc_cache *p_ic, *p_dc;
134 unsigned int cpu = smp_processor_id();
135 struct bcr_cache {
136#ifdef CONFIG_CPU_BIG_ENDIAN
137 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
138#else
139 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
140#endif
141 } ibcr, dbcr;
142
Vineet Gupta95d69762013-01-18 15:12:19 +0530143 p_ic = &cpuinfo_arc700[cpu].icache;
144 READ_BCR(ARC_REG_IC_BCR, ibcr);
145
Vineet Guptada40ff42014-06-27 15:49:47 +0530146 if (!ibcr.ver)
147 goto dc_chk;
148
Vineet Guptad1f317d2015-04-06 17:23:57 +0530149 if (ibcr.ver <= 3) {
150 BUG_ON(ibcr.config != 3);
151 p_ic->assoc = 2; /* Fixed to 2w set assoc */
152 } else if (ibcr.ver >= 4) {
153 p_ic->assoc = 1 << ibcr.config; /* 1,2,4,8 */
154 }
155
Vineet Gupta95d69762013-01-18 15:12:19 +0530156 p_ic->line_len = 8 << ibcr.line_len;
Vineet Guptada40ff42014-06-27 15:49:47 +0530157 p_ic->sz_k = 1 << (ibcr.sz - 1);
Vineet Guptada40ff42014-06-27 15:49:47 +0530158 p_ic->vipt = 1;
159 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
Vineet Gupta95d69762013-01-18 15:12:19 +0530160
Vineet Guptada40ff42014-06-27 15:49:47 +0530161dc_chk:
Vineet Gupta95d69762013-01-18 15:12:19 +0530162 p_dc = &cpuinfo_arc700[cpu].dcache;
163 READ_BCR(ARC_REG_DC_BCR, dbcr);
164
Vineet Guptada40ff42014-06-27 15:49:47 +0530165 if (!dbcr.ver)
Vineet Guptad1f317d2015-04-06 17:23:57 +0530166 goto slc_chk;
Vineet Guptada40ff42014-06-27 15:49:47 +0530167
Vineet Guptad1f317d2015-04-06 17:23:57 +0530168 if (dbcr.ver <= 3) {
169 BUG_ON(dbcr.config != 2);
170 p_dc->assoc = 4; /* Fixed to 4w set assoc */
171 p_dc->vipt = 1;
172 p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
173 } else if (dbcr.ver >= 4) {
174 p_dc->assoc = 1 << dbcr.config; /* 1,2,4,8 */
175 p_dc->vipt = 0;
176 p_dc->alias = 0; /* PIPT so can't VIPT alias */
177 }
178
Vineet Gupta95d69762013-01-18 15:12:19 +0530179 p_dc->line_len = 16 << dbcr.line_len;
Vineet Guptada40ff42014-06-27 15:49:47 +0530180 p_dc->sz_k = 1 << (dbcr.sz - 1);
Vineet Guptad1f317d2015-04-06 17:23:57 +0530181
182slc_chk:
Vineet Guptafd0881a22015-08-21 15:06:43 +0530183 if (is_isa_arcv2())
184 read_decode_cache_bcr_arcv2(cpu);
Vineet Gupta95d69762013-01-18 15:12:19 +0530185}
186
187/*
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530188 * Line Operation on {I,D}-Cache
Vineet Gupta95d69762013-01-18 15:12:19 +0530189 */
Vineet Gupta95d69762013-01-18 15:12:19 +0530190
191#define OP_INV 0x1
192#define OP_FLUSH 0x2
193#define OP_FLUSH_N_INV 0x3
Vineet Guptabd129762013-09-05 13:43:03 +0530194#define OP_INV_IC 0x4
195
196/*
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530197 * I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3)
198 *
199 * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
200 * The orig Cache Management Module "CDU" only required paddr to invalidate a
201 * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
202 * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
203 * the exact same line.
204 *
205 * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
206 * paddr alone could not be used to correctly index the cache.
207 *
208 * ------------------
209 * MMU v1/v2 (Fixed Page Size 8k)
210 * ------------------
211 * The solution was to provide CDU with these additonal vaddr bits. These
212 * would be bits [x:13], x would depend on cache-geometry, 13 comes from
213 * standard page size of 8k.
214 * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
215 * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
216 * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
217 * represent the offset within cache-line. The adv of using this "clumsy"
218 * interface for additional info was no new reg was needed in CDU programming
219 * model.
220 *
221 * 17:13 represented the max num of bits passable, actual bits needed were
222 * fewer, based on the num-of-aliases possible.
223 * -for 2 alias possibility, only bit 13 needed (32K cache)
224 * -for 4 alias possibility, bits 14:13 needed (64K cache)
225 *
226 * ------------------
227 * MMU v3
228 * ------------------
229 * This ver of MMU supports variable page sizes (1k-16k): although Linux will
230 * only support 8k (default), 16k and 4k.
Andrea Gelmini25474762016-05-21 13:45:35 +0200231 * However from hardware perspective, smaller page sizes aggravate aliasing
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530232 * meaning more vaddr bits needed to disambiguate the cache-line-op ;
233 * the existing scheme of piggybacking won't work for certain configurations.
234 * Two new registers IC_PTAG and DC_PTAG inttoduced.
235 * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
Vineet Guptabd129762013-09-05 13:43:03 +0530236 */
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530237
Vineet Gupta11e14892014-08-04 08:32:31 -0700238static inline
Vineet Gupta28b4af72015-09-14 18:43:42 -0700239void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
Vineet Gupta7d3d1622017-01-23 19:32:23 -0800240 unsigned long sz, const int op, const int full_page)
Vineet Guptabd129762013-09-05 13:43:03 +0530241{
Vineet Gupta11e14892014-08-04 08:32:31 -0700242 unsigned int aux_cmd;
Vineet Guptabd129762013-09-05 13:43:03 +0530243 int num_lines;
244
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530245 if (op == OP_INV_IC) {
Vineet Guptabd129762013-09-05 13:43:03 +0530246 aux_cmd = ARC_REG_IC_IVIL;
Vineet Gupta11e14892014-08-04 08:32:31 -0700247 } else {
Vineet Guptabd129762013-09-05 13:43:03 +0530248 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530249 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
Vineet Guptabd129762013-09-05 13:43:03 +0530250 }
251
252 /* Ensure we properly floor/ceil the non-line aligned/sized requests
253 * and have @paddr - aligned to cache line and integral @num_lines.
254 * This however can be avoided for page sized since:
255 * -@paddr will be cache-line aligned already (being page aligned)
256 * -@sz will be integral multiple of line size (being page sized).
257 */
Vineet Gupta11e14892014-08-04 08:32:31 -0700258 if (!full_page) {
Vineet Guptabd129762013-09-05 13:43:03 +0530259 sz += paddr & ~CACHE_LINE_MASK;
260 paddr &= CACHE_LINE_MASK;
261 vaddr &= CACHE_LINE_MASK;
262 }
263
264 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
265
Vineet Guptabd129762013-09-05 13:43:03 +0530266 /* MMUv2 and before: paddr contains stuffed vaddrs bits */
267 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
Vineet Guptabd129762013-09-05 13:43:03 +0530268
269 while (num_lines-- > 0) {
Vineet Gupta11e14892014-08-04 08:32:31 -0700270 write_aux_reg(aux_cmd, paddr);
271 paddr += L1_CACHE_BYTES;
272 }
273}
274
Vineet Gupta5a364c22015-02-06 18:44:57 +0300275/*
276 * For ARC700 MMUv3 I-cache and D-cache flushes
Vineet Guptafa84d732017-01-04 12:02:44 -0800277 * - ARC700 programming model requires paddr and vaddr be passed in seperate
278 * AUX registers (*_IV*L and *_PTAG respectively) irrespective of whether the
279 * caches actually alias or not.
280 * - For HS38, only the aliasing I-cache configuration uses the PTAG reg
281 * (non aliasing I-cache version doesn't; while D-cache can't possibly alias)
Vineet Gupta5a364c22015-02-06 18:44:57 +0300282 */
Vineet Gupta11e14892014-08-04 08:32:31 -0700283static inline
Vineet Gupta28b4af72015-09-14 18:43:42 -0700284void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
Vineet Gupta7d3d1622017-01-23 19:32:23 -0800285 unsigned long sz, const int op, const int full_page)
Vineet Gupta11e14892014-08-04 08:32:31 -0700286{
287 unsigned int aux_cmd, aux_tag;
288 int num_lines;
Vineet Gupta11e14892014-08-04 08:32:31 -0700289
290 if (op == OP_INV_IC) {
291 aux_cmd = ARC_REG_IC_IVIL;
292 aux_tag = ARC_REG_IC_PTAG;
293 } else {
294 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
295 aux_tag = ARC_REG_DC_PTAG;
296 }
297
298 /* Ensure we properly floor/ceil the non-line aligned/sized requests
299 * and have @paddr - aligned to cache line and integral @num_lines.
300 * This however can be avoided for page sized since:
301 * -@paddr will be cache-line aligned already (being page aligned)
302 * -@sz will be integral multiple of line size (being page sized).
303 */
304 if (!full_page) {
305 sz += paddr & ~CACHE_LINE_MASK;
306 paddr &= CACHE_LINE_MASK;
307 vaddr &= CACHE_LINE_MASK;
308 }
309 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
310
311 /*
312 * MMUv3, cache ops require paddr in PTAG reg
313 * if V-P const for loop, PTAG can be written once outside loop
314 */
315 if (full_page)
316 write_aux_reg(aux_tag, paddr);
317
Vineet Gupta5a364c22015-02-06 18:44:57 +0300318 /*
319 * This is technically for MMU v4, using the MMU v3 programming model
Andrea Gelmini25474762016-05-21 13:45:35 +0200320 * Special work for HS38 aliasing I-cache configuration with PAE40
Vineet Gupta5a364c22015-02-06 18:44:57 +0300321 * - upper 8 bits of paddr need to be written into PTAG_HI
322 * - (and needs to be written before the lower 32 bits)
323 * Note that PTAG_HI is hoisted outside the line loop
324 */
325 if (is_pae40_enabled() && op == OP_INV_IC)
326 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
327
Vineet Gupta11e14892014-08-04 08:32:31 -0700328 while (num_lines-- > 0) {
329 if (!full_page) {
Vineet Guptad4599ba2013-09-05 14:45:51 +0530330 write_aux_reg(aux_tag, paddr);
331 paddr += L1_CACHE_BYTES;
332 }
Vineet Guptabd129762013-09-05 13:43:03 +0530333
334 write_aux_reg(aux_cmd, vaddr);
335 vaddr += L1_CACHE_BYTES;
Vineet Guptabd129762013-09-05 13:43:03 +0530336 }
337}
Vineet Gupta95d69762013-01-18 15:12:19 +0530338
Vineet Gupta0d771172014-08-29 10:55:15 +0530339#ifndef USE_RGN_FLSH
340
Vineet Guptad1f317d2015-04-06 17:23:57 +0530341/*
Vineet Gupta5a364c22015-02-06 18:44:57 +0300342 * In HS38x (MMU v4), I-cache is VIPT (can alias), D-cache is PIPT
343 * Here's how cache ops are implemented
Vineet Guptad1f317d2015-04-06 17:23:57 +0530344 *
Vineet Gupta5a364c22015-02-06 18:44:57 +0300345 * - D-cache: only paddr needed (in DC_IVDL/DC_FLDL)
346 * - I-cache Non Aliasing: Despite VIPT, only paddr needed (in IC_IVIL)
347 * - I-cache Aliasing: Both vaddr and paddr needed (in IC_IVIL, IC_PTAG
348 * respectively, similar to MMU v3 programming model, hence
349 * __cache_line_loop_v3() is used)
350 *
351 * If PAE40 is enabled, independent of aliasing considerations, the higher bits
352 * needs to be written into PTAG_HI
Vineet Guptad1f317d2015-04-06 17:23:57 +0530353 */
354static inline
Vineet Gupta28b4af72015-09-14 18:43:42 -0700355void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
Vineet Gupta7d3d1622017-01-23 19:32:23 -0800356 unsigned long sz, const int op, const int full_page)
Vineet Guptad1f317d2015-04-06 17:23:57 +0530357{
358 unsigned int aux_cmd;
359 int num_lines;
Vineet Guptad1f317d2015-04-06 17:23:57 +0530360
Vineet Gupta7d3d1622017-01-23 19:32:23 -0800361 if (op == OP_INV_IC) {
Vineet Guptad1f317d2015-04-06 17:23:57 +0530362 aux_cmd = ARC_REG_IC_IVIL;
363 } else {
364 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
Vineet Gupta7d3d1622017-01-23 19:32:23 -0800365 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
Vineet Guptad1f317d2015-04-06 17:23:57 +0530366 }
367
368 /* Ensure we properly floor/ceil the non-line aligned/sized requests
369 * and have @paddr - aligned to cache line and integral @num_lines.
370 * This however can be avoided for page sized since:
371 * -@paddr will be cache-line aligned already (being page aligned)
372 * -@sz will be integral multiple of line size (being page sized).
373 */
Vineet Gupta7d3d1622017-01-23 19:32:23 -0800374 if (!full_page) {
Vineet Guptad1f317d2015-04-06 17:23:57 +0530375 sz += paddr & ~CACHE_LINE_MASK;
376 paddr &= CACHE_LINE_MASK;
377 }
378
379 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
380
Vineet Gupta5a364c22015-02-06 18:44:57 +0300381 /*
382 * For HS38 PAE40 configuration
383 * - upper 8 bits of paddr need to be written into PTAG_HI
384 * - (and needs to be written before the lower 32 bits)
385 */
386 if (is_pae40_enabled()) {
Vineet Gupta7d3d1622017-01-23 19:32:23 -0800387 if (op == OP_INV_IC)
Vineet Gupta5a364c22015-02-06 18:44:57 +0300388 /*
389 * Non aliasing I-cache in HS38,
390 * aliasing I-cache handled in __cache_line_loop_v3()
391 */
392 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
393 else
394 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
395 }
396
Vineet Guptad1f317d2015-04-06 17:23:57 +0530397 while (num_lines-- > 0) {
398 write_aux_reg(aux_cmd, paddr);
399 paddr += L1_CACHE_BYTES;
400 }
401}
402
Vineet Gupta0d771172014-08-29 10:55:15 +0530403#else
404
405/*
406 * optimized flush operation which takes a region as opposed to iterating per line
407 */
408static inline
409void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
410 unsigned long sz, const int op, const int full_page)
411{
Vineet Guptaee40bd12017-05-02 15:28:12 -0700412 unsigned int s, e;
Vineet Gupta0d771172014-08-29 10:55:15 +0530413
414 /* Only for Non aliasing I-cache in HS38 */
415 if (op == OP_INV_IC) {
416 s = ARC_REG_IC_IVIR;
417 e = ARC_REG_IC_ENDR;
418 } else {
419 s = ARC_REG_DC_STARTR;
420 e = ARC_REG_DC_ENDR;
421 }
422
423 if (!full_page) {
424 /* for any leading gap between @paddr and start of cache line */
425 sz += paddr & ~CACHE_LINE_MASK;
426 paddr &= CACHE_LINE_MASK;
427
428 /*
429 * account for any trailing gap to end of cache line
430 * this is equivalent to DIV_ROUND_UP() in line ops above
431 */
432 sz += L1_CACHE_BYTES - 1;
433 }
434
435 if (is_pae40_enabled()) {
436 /* TBD: check if crossing 4TB boundary */
437 if (op == OP_INV_IC)
438 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
439 else
440 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
441 }
442
Vineet Gupta0d771172014-08-29 10:55:15 +0530443 /* ENDR needs to be set ahead of START */
444 write_aux_reg(e, paddr + sz); /* ENDR is exclusive */
445 write_aux_reg(s, paddr);
446
447 /* caller waits on DC_CTRL.FS */
448}
449
450#endif
451
Vineet Gupta11e14892014-08-04 08:32:31 -0700452#if (CONFIG_ARC_MMU_VER < 3)
453#define __cache_line_loop __cache_line_loop_v2
454#elif (CONFIG_ARC_MMU_VER == 3)
455#define __cache_line_loop __cache_line_loop_v3
Vineet Guptad1f317d2015-04-06 17:23:57 +0530456#elif (CONFIG_ARC_MMU_VER > 3)
457#define __cache_line_loop __cache_line_loop_v4
Vineet Gupta11e14892014-08-04 08:32:31 -0700458#endif
459
Vineet Gupta95d69762013-01-18 15:12:19 +0530460#ifdef CONFIG_ARC_HAS_DCACHE
461
462/***************************************************************
463 * Machine specific helpers for Entire D-Cache or Per Line ops
464 */
465
Vineet Guptaee40bd12017-05-02 15:28:12 -0700466#ifndef USE_RGN_FLSH
467/*
468 * this version avoids extra read/write of DC_CTRL for flush or invalid ops
469 * in the non region flush regime (such as for ARCompact)
470 */
Vineet Gupta6c310682015-06-04 08:53:47 +0530471static inline void __before_dc_op(const int op)
Vineet Gupta95d69762013-01-18 15:12:19 +0530472{
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530473 if (op == OP_FLUSH_N_INV) {
474 /* Dcache provides 2 cmd: FLUSH or INV
475 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
476 * flush-n-inv is achieved by INV cmd but with IM=1
477 * So toggle INV sub-mode depending on op request and default
478 */
Vineet Gupta6c310682015-06-04 08:53:47 +0530479 const unsigned int ctl = ARC_REG_DC_CTRL;
480 write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530481 }
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530482}
483
Vineet Guptaee40bd12017-05-02 15:28:12 -0700484#else
485
486static inline void __before_dc_op(const int op)
487{
488 const unsigned int ctl = ARC_REG_DC_CTRL;
489 unsigned int val = read_aux_reg(ctl);
490
491 if (op == OP_FLUSH_N_INV) {
492 val |= DC_CTRL_INV_MODE_FLUSH;
493 }
494
495 if (op != OP_INV_IC) {
496 /*
497 * Flush / Invalidate is provided by DC_CTRL.RNG_OP 0 or 1
498 * combined Flush-n-invalidate uses DC_CTRL.IM = 1 set above
499 */
500 val &= ~DC_CTRL_RGN_OP_MSK;
501 if (op & OP_INV)
502 val |= DC_CTRL_RGN_OP_INV;
503 }
504 write_aux_reg(ctl, val);
505}
506
507#endif
508
509
Vineet Gupta6c310682015-06-04 08:53:47 +0530510static inline void __after_dc_op(const int op)
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530511{
Vineet Gupta6c310682015-06-04 08:53:47 +0530512 if (op & OP_FLUSH) {
513 const unsigned int ctl = ARC_REG_DC_CTRL;
514 unsigned int reg;
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530515
Vineet Gupta6c310682015-06-04 08:53:47 +0530516 /* flush / flush-n-inv both wait */
517 while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
518 ;
519
520 /* Switch back to default Invalidate mode */
521 if (op == OP_FLUSH_N_INV)
522 write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
523 }
Vineet Gupta95d69762013-01-18 15:12:19 +0530524}
525
526/*
527 * Operation on Entire D-Cache
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530528 * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
Vineet Gupta95d69762013-01-18 15:12:19 +0530529 * Note that constant propagation ensures all the checks are gone
530 * in generated code
531 */
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530532static inline void __dc_entire_op(const int op)
Vineet Gupta95d69762013-01-18 15:12:19 +0530533{
Vineet Gupta95d69762013-01-18 15:12:19 +0530534 int aux;
535
Vineet Gupta6c310682015-06-04 08:53:47 +0530536 __before_dc_op(op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530537
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530538 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
Vineet Gupta95d69762013-01-18 15:12:19 +0530539 aux = ARC_REG_DC_IVDC;
540 else
541 aux = ARC_REG_DC_FLSH;
542
543 write_aux_reg(aux, 0x1);
544
Vineet Gupta6c310682015-06-04 08:53:47 +0530545 __after_dc_op(op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530546}
547
Vineet Gupta8c47f832016-06-22 16:01:19 +0530548static inline void __dc_disable(void)
549{
550 const int r = ARC_REG_DC_CTRL;
551
552 __dc_entire_op(OP_FLUSH_N_INV);
553 write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
554}
555
556static void __dc_enable(void)
557{
558 const int r = ARC_REG_DC_CTRL;
559
560 write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
561}
562
Vineet Gupta4102b532013-05-09 21:54:51 +0530563/* For kernel mappings cache operation: index is same as paddr */
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530564#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
565
Vineet Gupta95d69762013-01-18 15:12:19 +0530566/*
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530567 * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
Vineet Gupta95d69762013-01-18 15:12:19 +0530568 */
Vineet Gupta28b4af72015-09-14 18:43:42 -0700569static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530570 unsigned long sz, const int op)
Vineet Gupta95d69762013-01-18 15:12:19 +0530571{
Vineet Gupta7d3d1622017-01-23 19:32:23 -0800572 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530573 unsigned long flags;
Vineet Gupta95d69762013-01-18 15:12:19 +0530574
575 local_irq_save(flags);
576
Vineet Gupta6c310682015-06-04 08:53:47 +0530577 __before_dc_op(op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530578
Vineet Gupta7d3d1622017-01-23 19:32:23 -0800579 __cache_line_loop(paddr, vaddr, sz, op, full_page);
Vineet Gupta95d69762013-01-18 15:12:19 +0530580
Vineet Gupta6c310682015-06-04 08:53:47 +0530581 __after_dc_op(op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530582
583 local_irq_restore(flags);
584}
585
586#else
587
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530588#define __dc_entire_op(op)
Vineet Gupta8c47f832016-06-22 16:01:19 +0530589#define __dc_disable()
590#define __dc_enable()
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530591#define __dc_line_op(paddr, vaddr, sz, op)
592#define __dc_line_op_k(paddr, sz, op)
Vineet Gupta95d69762013-01-18 15:12:19 +0530593
594#endif /* CONFIG_ARC_HAS_DCACHE */
595
Vineet Gupta95d69762013-01-18 15:12:19 +0530596#ifdef CONFIG_ARC_HAS_ICACHE
597
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530598static inline void __ic_entire_inv(void)
599{
600 write_aux_reg(ARC_REG_IC_IVIC, 1);
601 read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
602}
603
604static inline void
Vineet Gupta28b4af72015-09-14 18:43:42 -0700605__ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530606 unsigned long sz)
Vineet Gupta95d69762013-01-18 15:12:19 +0530607{
Vineet Gupta7d3d1622017-01-23 19:32:23 -0800608 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
Vineet Gupta95d69762013-01-18 15:12:19 +0530609 unsigned long flags;
Vineet Gupta95d69762013-01-18 15:12:19 +0530610
611 local_irq_save(flags);
Vineet Gupta7d3d1622017-01-23 19:32:23 -0800612 (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC, full_page);
Vineet Gupta95d69762013-01-18 15:12:19 +0530613 local_irq_restore(flags);
614}
615
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530616#ifndef CONFIG_SMP
Vineet Gupta336e1992013-06-22 19:22:42 +0530617
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530618#define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
619
620#else
621
622struct ic_inv_args {
Vineet Gupta28b4af72015-09-14 18:43:42 -0700623 phys_addr_t paddr, vaddr;
Vineet Gupta2328af02013-02-17 12:51:42 +0200624 int sz;
625};
626
627static void __ic_line_inv_vaddr_helper(void *info)
628{
Noam Camus014018e2014-09-03 14:41:11 +0300629 struct ic_inv_args *ic_inv = info;
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530630
Vineet Gupta2328af02013-02-17 12:51:42 +0200631 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
632}
633
Vineet Gupta28b4af72015-09-14 18:43:42 -0700634static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
Vineet Gupta2328af02013-02-17 12:51:42 +0200635 unsigned long sz)
636{
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530637 struct ic_inv_args ic_inv = {
638 .paddr = paddr,
639 .vaddr = vaddr,
640 .sz = sz
641 };
642
Vineet Gupta2328af02013-02-17 12:51:42 +0200643 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
644}
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530645
646#endif /* CONFIG_SMP */
647
648#else /* !CONFIG_ARC_HAS_ICACHE */
Vineet Gupta95d69762013-01-18 15:12:19 +0530649
Vineet Gupta336e1992013-06-22 19:22:42 +0530650#define __ic_entire_inv()
Vineet Gupta95d69762013-01-18 15:12:19 +0530651#define __ic_line_inv_vaddr(pstart, vstart, sz)
652
653#endif /* CONFIG_ARC_HAS_ICACHE */
654
Vineet Gupta28b4af72015-09-14 18:43:42 -0700655noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
Vineet Gupta795f4552015-04-03 12:37:07 +0300656{
657#ifdef CONFIG_ISA_ARCV2
Alexey Brodkinb607edd2015-06-29 15:24:37 +0300658 /*
659 * SLC is shared between all cores and concurrent aux operations from
660 * multiple cores need to be serialized using a spinlock
661 * A concurrent operation can be silently ignored and/or the old/new
662 * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
663 * below)
664 */
665 static DEFINE_SPINLOCK(lock);
Vineet Gupta795f4552015-04-03 12:37:07 +0300666 unsigned long flags;
667 unsigned int ctrl;
Alexey Brodkin7d79cee2017-08-01 12:58:47 +0300668 phys_addr_t end;
Vineet Gupta795f4552015-04-03 12:37:07 +0300669
Alexey Brodkinb607edd2015-06-29 15:24:37 +0300670 spin_lock_irqsave(&lock, flags);
Vineet Gupta795f4552015-04-03 12:37:07 +0300671
672 /*
673 * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
674 * - b'000 (default) is Flush,
675 * - b'001 is Invalidate if CTRL.IM == 0
676 * - b'001 is Flush-n-Invalidate if CTRL.IM == 1
677 */
678 ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
679
680 /* Don't rely on default value of IM bit */
681 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
682 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
683 else
684 ctrl |= SLC_CTRL_IM;
685
686 if (op & OP_INV)
687 ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */
688 else
689 ctrl &= ~SLC_CTRL_RGN_OP_INV;
690
691 write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
692
693 /*
694 * Lower bits are ignored, no need to clip
695 * END needs to be setup before START (latter triggers the operation)
696 * END can't be same as START, so add (l2_line_sz - 1) to sz
697 */
Alexey Brodkin7d79cee2017-08-01 12:58:47 +0300698 end = paddr + sz + l2_line_sz - 1;
699 if (is_pae40_enabled())
700 write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
701
702 write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
703
704 if (is_pae40_enabled())
705 write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
706
707 write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
Vineet Gupta795f4552015-04-03 12:37:07 +0300708
Alexey Brodkinb37174d2017-07-07 12:25:14 +0300709 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
710 read_aux_reg(ARC_REG_SLC_CTRL);
711
Vineet Gupta795f4552015-04-03 12:37:07 +0300712 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
713
Alexey Brodkinb607edd2015-06-29 15:24:37 +0300714 spin_unlock_irqrestore(&lock, flags);
Vineet Gupta795f4552015-04-03 12:37:07 +0300715#endif
716}
717
Vineet Guptad4911cd2016-06-22 15:43:22 +0530718noinline static void slc_entire_op(const int op)
719{
720 unsigned int ctrl, r = ARC_REG_SLC_CTRL;
721
722 ctrl = read_aux_reg(r);
723
724 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
725 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
726 else
727 ctrl |= SLC_CTRL_IM;
728
729 write_aux_reg(r, ctrl);
730
731 write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
732
Alexey Brodkinc70c4732017-03-29 17:15:11 +0300733 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
734 read_aux_reg(r);
735
Vineet Guptad4911cd2016-06-22 15:43:22 +0530736 /* Important to wait for flush to complete */
737 while (read_aux_reg(r) & SLC_CTRL_BUSY);
738}
739
740static inline void arc_slc_disable(void)
741{
742 const int r = ARC_REG_SLC_CTRL;
743
744 slc_entire_op(OP_FLUSH_N_INV);
745 write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
746}
747
748static inline void arc_slc_enable(void)
749{
750 const int r = ARC_REG_SLC_CTRL;
751
752 write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
753}
754
Vineet Gupta95d69762013-01-18 15:12:19 +0530755/***********************************************************
756 * Exported APIs
757 */
758
Vineet Gupta4102b532013-05-09 21:54:51 +0530759/*
760 * Handle cache congruency of kernel and userspace mappings of page when kernel
761 * writes-to/reads-from
762 *
763 * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
764 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
765 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
766 * -In SMP, if hardware caches are coherent
767 *
768 * There's a corollary case, where kernel READs from a userspace mapped page.
769 * If the U-mapping is not congruent to to K-mapping, former needs flushing.
770 */
Vineet Gupta95d69762013-01-18 15:12:19 +0530771void flush_dcache_page(struct page *page)
772{
Vineet Gupta4102b532013-05-09 21:54:51 +0530773 struct address_space *mapping;
774
775 if (!cache_is_vipt_aliasing()) {
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530776 clear_bit(PG_dc_clean, &page->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530777 return;
778 }
779
780 /* don't handle anon pages here */
781 mapping = page_mapping(page);
782 if (!mapping)
783 return;
784
785 /*
786 * pagecache page, file not yet mapped to userspace
787 * Make a note that K-mapping is dirty
788 */
789 if (!mapping_mapped(mapping)) {
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530790 clear_bit(PG_dc_clean, &page->flags);
Kirill A. Shutemove1534ae2016-01-15 16:53:46 -0800791 } else if (page_mapcount(page)) {
Vineet Gupta4102b532013-05-09 21:54:51 +0530792
793 /* kernel reading from page with U-mapping */
Vineet Gupta28b4af72015-09-14 18:43:42 -0700794 phys_addr_t paddr = (unsigned long)page_address(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300795 unsigned long vaddr = page->index << PAGE_SHIFT;
Vineet Gupta4102b532013-05-09 21:54:51 +0530796
797 if (addr_not_cache_congruent(paddr, vaddr))
798 __flush_dcache_page(paddr, vaddr);
799 }
Vineet Gupta95d69762013-01-18 15:12:19 +0530800}
801EXPORT_SYMBOL(flush_dcache_page);
802
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300803/*
804 * DMA ops for systems with L1 cache only
805 * Make memory coherent with L1 cache by flushing/invalidating L1 lines
806 */
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530807static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz)
Vineet Gupta95d69762013-01-18 15:12:19 +0530808{
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530809 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300810}
Vineet Gupta795f4552015-04-03 12:37:07 +0300811
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530812static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz)
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300813{
814 __dc_line_op_k(start, sz, OP_INV);
815}
816
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530817static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz)
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300818{
819 __dc_line_op_k(start, sz, OP_FLUSH);
820}
821
822/*
823 * DMA ops for systems with both L1 and L2 caches, but without IOC
Adam Buchbinder7423cc02016-02-23 15:24:55 -0800824 * Both L1 and L2 lines need to be explicitly flushed/invalidated
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300825 */
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530826static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz)
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300827{
828 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
829 slc_op(start, sz, OP_FLUSH_N_INV);
830}
831
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530832static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz)
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300833{
834 __dc_line_op_k(start, sz, OP_INV);
835 slc_op(start, sz, OP_INV);
836}
837
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530838static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300839{
840 __dc_line_op_k(start, sz, OP_FLUSH);
841 slc_op(start, sz, OP_FLUSH);
842}
843
844/*
845 * DMA ops for systems with IOC
846 * IOC hardware snoops all DMA traffic keeping the caches consistent with
847 * memory - eliding need for any explicit cache maintenance of DMA buffers
848 */
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530849static void __dma_cache_wback_inv_ioc(phys_addr_t start, unsigned long sz) {}
850static void __dma_cache_inv_ioc(phys_addr_t start, unsigned long sz) {}
851static void __dma_cache_wback_ioc(phys_addr_t start, unsigned long sz) {}
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300852
853/*
854 * Exported DMA API
855 */
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530856void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300857{
858 __dma_cache_wback_inv(start, sz);
Vineet Gupta95d69762013-01-18 15:12:19 +0530859}
860EXPORT_SYMBOL(dma_cache_wback_inv);
861
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530862void dma_cache_inv(phys_addr_t start, unsigned long sz)
Vineet Gupta95d69762013-01-18 15:12:19 +0530863{
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300864 __dma_cache_inv(start, sz);
Vineet Gupta95d69762013-01-18 15:12:19 +0530865}
866EXPORT_SYMBOL(dma_cache_inv);
867
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530868void dma_cache_wback(phys_addr_t start, unsigned long sz)
Vineet Gupta95d69762013-01-18 15:12:19 +0530869{
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300870 __dma_cache_wback(start, sz);
Vineet Gupta95d69762013-01-18 15:12:19 +0530871}
872EXPORT_SYMBOL(dma_cache_wback);
873
874/*
Vineet Gupta7586bf722013-04-12 12:18:25 +0530875 * This is API for making I/D Caches consistent when modifying
876 * kernel code (loadable modules, kprobes, kgdb...)
Vineet Gupta95d69762013-01-18 15:12:19 +0530877 * This is called on insmod, with kernel virtual address for CODE of
878 * the module. ARC cache maintenance ops require PHY address thus we
879 * need to convert vmalloc addr to PHY addr
880 */
881void flush_icache_range(unsigned long kstart, unsigned long kend)
882{
Vineet Guptac59414c2014-09-24 11:36:20 +0530883 unsigned int tot_sz;
Vineet Gupta95d69762013-01-18 15:12:19 +0530884
Vineet Guptac59414c2014-09-24 11:36:20 +0530885 WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
Vineet Gupta95d69762013-01-18 15:12:19 +0530886
887 /* Shortcut for bigger flush ranges.
888 * Here we don't care if this was kernel virtual or phy addr
889 */
890 tot_sz = kend - kstart;
891 if (tot_sz > PAGE_SIZE) {
892 flush_cache_all();
893 return;
894 }
895
896 /* Case: Kernel Phy addr (0x8000_0000 onwards) */
897 if (likely(kstart > PAGE_OFFSET)) {
Vineet Gupta7586bf722013-04-12 12:18:25 +0530898 /*
899 * The 2nd arg despite being paddr will be used to index icache
900 * This is OK since no alternate virtual mappings will exist
901 * given the callers for this case: kprobe/kgdb in built-in
902 * kernel code only.
903 */
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530904 __sync_icache_dcache(kstart, kstart, kend - kstart);
Vineet Gupta95d69762013-01-18 15:12:19 +0530905 return;
906 }
907
908 /*
909 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
910 * (1) ARC Cache Maintenance ops only take Phy addr, hence special
911 * handling of kernel vaddr.
912 *
913 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
914 * it still needs to handle a 2 page scenario, where the range
915 * straddles across 2 virtual pages and hence need for loop
916 */
917 while (tot_sz > 0) {
Vineet Guptac59414c2014-09-24 11:36:20 +0530918 unsigned int off, sz;
919 unsigned long phy, pfn;
920
Vineet Gupta95d69762013-01-18 15:12:19 +0530921 off = kstart % PAGE_SIZE;
922 pfn = vmalloc_to_pfn((void *)kstart);
923 phy = (pfn << PAGE_SHIFT) + off;
924 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530925 __sync_icache_dcache(phy, kstart, sz);
Vineet Gupta95d69762013-01-18 15:12:19 +0530926 kstart += sz;
927 tot_sz -= sz;
928 }
929}
Pranith Kumare3560302014-08-29 15:19:09 -0700930EXPORT_SYMBOL(flush_icache_range);
Vineet Gupta95d69762013-01-18 15:12:19 +0530931
932/*
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530933 * General purpose helper to make I and D cache lines consistent.
934 * @paddr is phy addr of region
Vineet Gupta4b06ff32013-07-10 11:40:27 +0530935 * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
936 * However in one instance, when called by kprobe (for a breakpt in
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530937 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
938 * use a paddr to index the cache (despite VIPT). This is fine since since a
Vineet Gupta4b06ff32013-07-10 11:40:27 +0530939 * builtin kernel page will not have any virtual mappings.
940 * kprobe on loadable module will be kernel vaddr.
Vineet Gupta95d69762013-01-18 15:12:19 +0530941 */
Vineet Gupta28b4af72015-09-14 18:43:42 -0700942void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
Vineet Gupta95d69762013-01-18 15:12:19 +0530943{
Vineet Guptaf5388812013-05-16 12:19:29 +0530944 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
Vineet Gupta2328af02013-02-17 12:51:42 +0200945 __ic_line_inv_vaddr(paddr, vaddr, len);
Vineet Gupta95d69762013-01-18 15:12:19 +0530946}
947
Vineet Gupta24603fd2013-04-11 18:36:35 +0530948/* wrapper to compile time eliminate alignment checks in flush loop */
Vineet Gupta28b4af72015-09-14 18:43:42 -0700949void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr)
Vineet Gupta95d69762013-01-18 15:12:19 +0530950{
Vineet Gupta24603fd2013-04-11 18:36:35 +0530951 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
Vineet Gupta95d69762013-01-18 15:12:19 +0530952}
953
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530954/*
955 * wrapper to clearout kernel or userspace mappings of a page
956 * For kernel mappings @vaddr == @paddr
957 */
Vineet Gupta28b4af72015-09-14 18:43:42 -0700958void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr)
Vineet Guptaeacd0e92013-04-16 14:10:48 +0530959{
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530960 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
Vineet Guptaeacd0e92013-04-16 14:10:48 +0530961}
962
Vineet Gupta95d69762013-01-18 15:12:19 +0530963noinline void flush_cache_all(void)
964{
965 unsigned long flags;
966
967 local_irq_save(flags);
968
Vineet Gupta336e1992013-06-22 19:22:42 +0530969 __ic_entire_inv();
Vineet Gupta95d69762013-01-18 15:12:19 +0530970 __dc_entire_op(OP_FLUSH_N_INV);
971
972 local_irq_restore(flags);
973
974}
975
Vineet Gupta4102b532013-05-09 21:54:51 +0530976#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
977
978void flush_cache_mm(struct mm_struct *mm)
979{
980 flush_cache_all();
981}
982
983void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
984 unsigned long pfn)
985{
986 unsigned int paddr = pfn << PAGE_SHIFT;
987
Vineet Gupta5971bc72013-05-16 12:23:31 +0530988 u_vaddr &= PAGE_MASK;
989
Vineet Gupta45309492015-05-18 12:46:37 +0530990 __flush_dcache_page(paddr, u_vaddr);
Vineet Gupta5971bc72013-05-16 12:23:31 +0530991
992 if (vma->vm_flags & VM_EXEC)
993 __inv_icache_page(paddr, u_vaddr);
Vineet Gupta4102b532013-05-09 21:54:51 +0530994}
995
996void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
997 unsigned long end)
998{
999 flush_cache_all();
1000}
1001
Vineet Gupta7bb66f62013-05-25 14:04:25 +05301002void flush_anon_page(struct vm_area_struct *vma, struct page *page,
1003 unsigned long u_vaddr)
1004{
1005 /* TBD: do we really need to clear the kernel mapping */
1006 __flush_dcache_page(page_address(page), u_vaddr);
1007 __flush_dcache_page(page_address(page), page_address(page));
1008
1009}
1010
1011#endif
1012
Vineet Gupta4102b532013-05-09 21:54:51 +05301013void copy_user_highpage(struct page *to, struct page *from,
1014 unsigned long u_vaddr, struct vm_area_struct *vma)
1015{
Vineet Gupta336e2132015-03-05 17:06:31 +05301016 void *kfrom = kmap_atomic(from);
1017 void *kto = kmap_atomic(to);
Vineet Gupta4102b532013-05-09 21:54:51 +05301018 int clean_src_k_mappings = 0;
1019
1020 /*
1021 * If SRC page was already mapped in userspace AND it's U-mapping is
1022 * not congruent with K-mapping, sync former to physical page so that
1023 * K-mapping in memcpy below, sees the right data
1024 *
1025 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
1026 * equally valid for SRC page as well
Vineet Gupta336e2132015-03-05 17:06:31 +05301027 *
1028 * For !VIPT cache, all of this gets compiled out as
1029 * addr_not_cache_congruent() is 0
Vineet Gupta4102b532013-05-09 21:54:51 +05301030 */
Kirill A. Shutemove1534ae2016-01-15 16:53:46 -08001031 if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
Vineet Gupta336e2132015-03-05 17:06:31 +05301032 __flush_dcache_page((unsigned long)kfrom, u_vaddr);
Vineet Gupta4102b532013-05-09 21:54:51 +05301033 clean_src_k_mappings = 1;
1034 }
1035
Vineet Gupta336e2132015-03-05 17:06:31 +05301036 copy_page(kto, kfrom);
Vineet Gupta4102b532013-05-09 21:54:51 +05301037
1038 /*
1039 * Mark DST page K-mapping as dirty for a later finalization by
1040 * update_mmu_cache(). Although the finalization could have been done
1041 * here as well (given that both vaddr/paddr are available).
1042 * But update_mmu_cache() already has code to do that for other
1043 * non copied user pages (e.g. read faults which wire in pagecache page
1044 * directly).
1045 */
Vineet Gupta2ed21da2013-05-13 17:23:58 +05301046 clear_bit(PG_dc_clean, &to->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +05301047
1048 /*
1049 * if SRC was already usermapped and non-congruent to kernel mapping
1050 * sync the kernel mapping back to physical page
1051 */
1052 if (clean_src_k_mappings) {
Vineet Gupta336e2132015-03-05 17:06:31 +05301053 __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom);
Vineet Gupta2ed21da2013-05-13 17:23:58 +05301054 set_bit(PG_dc_clean, &from->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +05301055 } else {
Vineet Gupta2ed21da2013-05-13 17:23:58 +05301056 clear_bit(PG_dc_clean, &from->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +05301057 }
Vineet Gupta336e2132015-03-05 17:06:31 +05301058
1059 kunmap_atomic(kto);
1060 kunmap_atomic(kfrom);
Vineet Gupta4102b532013-05-09 21:54:51 +05301061}
1062
1063void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
1064{
1065 clear_page(to);
Vineet Gupta2ed21da2013-05-13 17:23:58 +05301066 clear_bit(PG_dc_clean, &page->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +05301067}
1068
Vineet Gupta4102b532013-05-09 21:54:51 +05301069
Vineet Gupta95d69762013-01-18 15:12:19 +05301070/**********************************************************************
1071 * Explicit Cache flush request from user space via syscall
1072 * Needed for JITs which generate code on the fly
1073 */
1074SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
1075{
1076 /* TBD: optimize this */
1077 flush_cache_all();
1078 return 0;
1079}
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301080
Vineet Gupta8c47f832016-06-22 16:01:19 +05301081/*
1082 * IO-Coherency (IOC) setup rules:
1083 *
1084 * 1. Needs to be at system level, so only once by Master core
1085 * Non-Masters need not be accessing caches at that time
1086 * - They are either HALT_ON_RESET and kick started much later or
1087 * - if run on reset, need to ensure that arc_platform_smp_wait_to_boot()
1088 * doesn't perturb caches or coherency unit
1089 *
1090 * 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC,
1091 * otherwise any straggler data might behave strangely post IOC enabling
1092 *
1093 * 3. All Caches need to be disabled when setting up IOC to elide any in-flight
1094 * Coherency transactions
1095 */
Vineet Gupta76894a72017-01-18 15:10:52 -08001096noinline void __init arc_ioc_setup(void)
Vineet Guptad4911cd2016-06-22 15:43:22 +05301097{
Vineet Guptae497c8e2017-01-18 12:59:21 -08001098 unsigned int ap_sz;
1099
Vineet Gupta8c47f832016-06-22 16:01:19 +05301100 /* Flush + invalidate + disable L1 dcache */
1101 __dc_disable();
1102
1103 /* Flush + invalidate SLC */
1104 if (read_aux_reg(ARC_REG_SLC_BCR))
1105 slc_entire_op(OP_FLUSH_N_INV);
1106
1107 /* IOC Aperture start: TDB: handle non default CONFIG_LINUX_LINK_BASE */
Vineet Guptad4911cd2016-06-22 15:43:22 +05301108 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
Vineet Gupta8c47f832016-06-22 16:01:19 +05301109
Vineet Guptae497c8e2017-01-18 12:59:21 -08001110 /*
1111 * IOC Aperture size:
1112 * decoded as 2 ^ (SIZE + 2) KB: so setting 0x11 implies 512M
1113 * TBD: fix for PGU + 1GB of low mem
1114 * TBD: fix for PAE
1115 */
1116 ap_sz = order_base_2(arc_get_mem_sz()/1024) - 2;
1117 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, ap_sz);
Vineet Gupta8c47f832016-06-22 16:01:19 +05301118
Vineet Guptad4911cd2016-06-22 15:43:22 +05301119 write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
1120 write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
Vineet Gupta8c47f832016-06-22 16:01:19 +05301121
1122 /* Re-enable L1 dcache */
1123 __dc_enable();
Vineet Guptad4911cd2016-06-22 15:43:22 +05301124}
1125
Vineet Guptab5ddb6d2017-08-03 17:45:44 +05301126/*
1127 * Cache related boot time checks/setups only needed on master CPU:
1128 * - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES)
1129 * Assume SMP only, so all cores will have same cache config. A check on
1130 * one core suffices for all
1131 * - IOC setup / dma callbacks only need to be done once
1132 */
Vineet Gupta76894a72017-01-18 15:10:52 -08001133void __init arc_cache_init_master(void)
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301134{
1135 unsigned int __maybe_unused cpu = smp_processor_id();
Vineet Gupta45c3b082016-06-13 16:38:27 +02001136
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301137 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
1138 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
1139
Vineet Guptaf64915b2016-12-19 11:24:08 -08001140 if (!ic->line_len)
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301141 panic("cache support enabled but non-existent cache\n");
1142
1143 if (ic->line_len != L1_CACHE_BYTES)
1144 panic("ICache line [%d] != kernel Config [%d]",
1145 ic->line_len, L1_CACHE_BYTES);
1146
Vineet Guptabcc4d652015-06-04 14:39:15 +05301147 /*
Andrea Gelmini25474762016-05-21 13:45:35 +02001148 * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
Vineet Guptabcc4d652015-06-04 14:39:15 +05301149 * pair to provide vaddr/paddr respectively, just as in MMU v3
1150 */
1151 if (is_isa_arcv2() && ic->alias)
1152 _cache_line_loop_ic_fn = __cache_line_loop_v3;
1153 else
1154 _cache_line_loop_ic_fn = __cache_line_loop;
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301155 }
1156
1157 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
1158 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301159
Vineet Guptaf64915b2016-12-19 11:24:08 -08001160 if (!dc->line_len)
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301161 panic("cache support enabled but non-existent cache\n");
1162
1163 if (dc->line_len != L1_CACHE_BYTES)
1164 panic("DCache line [%d] != kernel Config [%d]",
1165 dc->line_len, L1_CACHE_BYTES);
1166
Vineet Guptad1f317d2015-04-06 17:23:57 +05301167 /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
1168 if (is_isa_arcompact()) {
1169 int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
Vineet Gupta08fe0072016-12-19 11:38:38 -08001170 int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE);
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301171
Vineet Gupta08fe0072016-12-19 11:38:38 -08001172 if (dc->alias) {
1173 if (!handled)
1174 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
1175 if (CACHE_COLORS_NUM != num_colors)
1176 panic("CACHE_COLORS_NUM not optimized for config\n");
1177 } else if (!dc->alias && handled) {
Vineet Guptad1f317d2015-04-06 17:23:57 +05301178 panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
Vineet Gupta08fe0072016-12-19 11:38:38 -08001179 }
Vineet Guptad1f317d2015-04-06 17:23:57 +05301180 }
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301181 }
Alexey Brodkinf2b0b252015-05-25 19:54:28 +03001182
Vineet Guptad4911cd2016-06-22 15:43:22 +05301183 /* Note that SLC disable not formally supported till HS 3.0 */
1184 if (is_isa_arcv2() && l2_line_sz && !slc_enable)
1185 arc_slc_disable();
Vineet Gupta79335a22015-06-04 18:30:23 +05301186
Vineet Guptad4911cd2016-06-22 15:43:22 +05301187 if (is_isa_arcv2() && ioc_enable)
1188 arc_ioc_setup();
Vineet Gupta79335a22015-06-04 18:30:23 +05301189
Vineet Guptacf986d42016-10-13 15:58:59 -07001190 if (is_isa_arcv2() && ioc_enable) {
Alexey Brodkinf2b0b252015-05-25 19:54:28 +03001191 __dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
1192 __dma_cache_inv = __dma_cache_inv_ioc;
1193 __dma_cache_wback = __dma_cache_wback_ioc;
Vineet Gupta79335a22015-06-04 18:30:23 +05301194 } else if (is_isa_arcv2() && l2_line_sz && slc_enable) {
Alexey Brodkinf2b0b252015-05-25 19:54:28 +03001195 __dma_cache_wback_inv = __dma_cache_wback_inv_slc;
1196 __dma_cache_inv = __dma_cache_inv_slc;
1197 __dma_cache_wback = __dma_cache_wback_slc;
1198 } else {
1199 __dma_cache_wback_inv = __dma_cache_wback_inv_l1;
1200 __dma_cache_inv = __dma_cache_inv_l1;
1201 __dma_cache_wback = __dma_cache_wback_l1;
1202 }
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301203}
Vineet Gupta76894a72017-01-18 15:10:52 -08001204
1205void __ref arc_cache_init(void)
1206{
1207 unsigned int __maybe_unused cpu = smp_processor_id();
1208 char str[256];
1209
1210 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
1211
Vineet Gupta76894a72017-01-18 15:10:52 -08001212 if (!cpu)
1213 arc_cache_init_master();
Vineet Guptab5ddb6d2017-08-03 17:45:44 +05301214
1215 /*
1216 * In PAE regime, TLB and cache maintenance ops take wider addresses
1217 * And even if PAE is not enabled in kernel, the upper 32-bits still need
1218 * to be zeroed to keep the ops sane.
1219 * As an optimization for more common !PAE enabled case, zero them out
1220 * once at init, rather than checking/setting to 0 for every runtime op
1221 */
1222 if (is_isa_arcv2() && pae40_exist_but_not_enab()) {
1223
1224 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE))
1225 write_aux_reg(ARC_REG_IC_PTAG_HI, 0);
1226
1227 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE))
1228 write_aux_reg(ARC_REG_DC_PTAG_HI, 0);
1229
1230 if (l2_line_sz) {
1231 write_aux_reg(ARC_REG_SLC_RGN_END1, 0);
1232 write_aux_reg(ARC_REG_SLC_RGN_START1, 0);
1233 }
1234 }
Vineet Gupta76894a72017-01-18 15:10:52 -08001235}