blob: a950a22c489041e78c05094553bb254313c9d636 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Arnd Bergmann5c01b462009-05-13 22:56:36 +00002#ifndef __ASM_CACHEFLUSH_H
3#define __ASM_CACHEFLUSH_H
4
5/* Keep includes the same across arches. */
6#include <linux/mm.h>
7
Qian Caic296d4d2019-07-16 16:27:06 -07008#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
9
Arnd Bergmann5c01b462009-05-13 22:56:36 +000010/*
11 * The cache doesn't need to be flushed when TLB entries change when
12 * the cache is mapped to physical memory, not virtual memory
13 */
Qian Caic296d4d2019-07-16 16:27:06 -070014static inline void flush_cache_all(void)
15{
16}
17
18static inline void flush_cache_mm(struct mm_struct *mm)
19{
20}
21
22static inline void flush_cache_dup_mm(struct mm_struct *mm)
23{
24}
25
26static inline void flush_cache_range(struct vm_area_struct *vma,
27 unsigned long start,
28 unsigned long end)
29{
30}
31
32static inline void flush_cache_page(struct vm_area_struct *vma,
33 unsigned long vmaddr,
34 unsigned long pfn)
35{
36}
37
38static inline void flush_dcache_page(struct page *page)
39{
40}
41
42static inline void flush_dcache_mmap_lock(struct address_space *mapping)
43{
44}
45
46static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
47{
48}
49
50static inline void flush_icache_range(unsigned long start, unsigned long end)
51{
52}
53
54static inline void flush_icache_page(struct vm_area_struct *vma,
55 struct page *page)
56{
57}
58
59static inline void flush_icache_user_range(struct vm_area_struct *vma,
60 struct page *page,
61 unsigned long addr, int len)
62{
63}
64
65static inline void flush_cache_vmap(unsigned long start, unsigned long end)
66{
67}
68
69static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
70{
71}
Arnd Bergmann5c01b462009-05-13 22:56:36 +000072
73#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
Mike Frysingerf68aa5b2011-05-24 17:12:53 -070074 do { \
75 memcpy(dst, src, len); \
76 flush_icache_user_range(vma, page, vaddr, len); \
77 } while (0)
Arnd Bergmann5c01b462009-05-13 22:56:36 +000078#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
79 memcpy(dst, src, len)
80
81#endif /* __ASM_CACHEFLUSH_H */