blob: 9ebcff8ba263c07e84896e805e1ad243edefb5c5 [file] [log] [blame]
Thomas Gleixner50acfb22019-05-29 07:18:00 -07001// SPDX-License-Identifier: GPL-2.0-only
Andrew Waterman08f051e2017-10-25 14:30:32 -07002/*
3 * Copyright (C) 2017 SiFive
Andrew Waterman08f051e2017-10-25 14:30:32 -07004 */
5
6#include <asm/pgtable.h>
7#include <asm/cacheflush.h>
8
Gary Guo58de7752019-03-27 00:41:25 +00009#ifdef CONFIG_SMP
10
11#include <asm/sbi.h>
12
13void flush_icache_all(void)
14{
15 sbi_remote_fence_i(NULL);
16}
17
18/*
19 * Performs an icache flush for the given MM context. RISC-V has no direct
20 * mechanism for instruction cache shoot downs, so instead we send an IPI that
21 * informs the remote harts they need to flush their local instruction caches.
22 * To avoid pathologically slow behavior in a common case (a bunch of
23 * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
24 * IPIs for harts that are not currently executing a MM context and instead
25 * schedule a deferred local instruction cache flush to be performed before
26 * execution resumes on each hart.
27 */
28void flush_icache_mm(struct mm_struct *mm, bool local)
29{
30 unsigned int cpu;
31 cpumask_t others, hmask, *mask;
32
33 preempt_disable();
34
35 /* Mark every hart's icache as needing a flush for this MM. */
36 mask = &mm->context.icache_stale_mask;
37 cpumask_setall(mask);
38 /* Flush this hart's I$ now, and mark it as flushed. */
39 cpu = smp_processor_id();
40 cpumask_clear_cpu(cpu, mask);
41 local_flush_icache_all();
42
43 /*
44 * Flush the I$ of other harts concurrently executing, and mark them as
45 * flushed.
46 */
47 cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
48 local |= cpumask_empty(&others);
49 if (mm != current->active_mm || !local) {
50 cpumask_clear(&hmask);
51 riscv_cpuid_to_hartid_mask(&others, &hmask);
52 sbi_remote_fence_i(hmask.bits);
53 } else {
54 /*
55 * It's assumed that at least one strongly ordered operation is
56 * performed on this hart between setting a hart's cpumask bit
57 * and scheduling this MM context on that hart. Sending an SBI
58 * remote message will do this, but in the case where no
59 * messages are sent we still need to order this hart's writes
60 * with flush_icache_deferred().
61 */
62 smp_mb();
63 }
64
65 preempt_enable();
66}
67
68#endif /* CONFIG_SMP */
69
Andrew Waterman08f051e2017-10-25 14:30:32 -070070void flush_icache_pte(pte_t pte)
71{
72 struct page *page = pte_page(pte);
73
74 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
75 flush_icache_all();
76}