blob: 907fa5d1649440e7f1561740bab54dbce9c9ad40 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Christoph Hellwig92a73bd2020-06-07 21:41:39 -07002#ifndef _ASM_GENERIC_CACHEFLUSH_H
3#define _ASM_GENERIC_CACHEFLUSH_H
Arnd Bergmann5c01b462009-05-13 22:56:36 +00004
Arnd Bergmann5c01b462009-05-13 22:56:36 +00005/*
6 * The cache doesn't need to be flushed when TLB entries change when
7 * the cache is mapped to physical memory, not virtual memory
8 */
Mike Rapoport4f0bd802019-12-23 13:00:03 +02009#ifndef flush_cache_all
Qian Caic296d4d2019-07-16 16:27:06 -070010static inline void flush_cache_all(void)
11{
12}
Mike Rapoport4f0bd802019-12-23 13:00:03 +020013#endif
Qian Caic296d4d2019-07-16 16:27:06 -070014
Mike Rapoport4f0bd802019-12-23 13:00:03 +020015#ifndef flush_cache_mm
Qian Caic296d4d2019-07-16 16:27:06 -070016static inline void flush_cache_mm(struct mm_struct *mm)
17{
18}
Mike Rapoport4f0bd802019-12-23 13:00:03 +020019#endif
Qian Caic296d4d2019-07-16 16:27:06 -070020
Mike Rapoport4f0bd802019-12-23 13:00:03 +020021#ifndef flush_cache_dup_mm
Qian Caic296d4d2019-07-16 16:27:06 -070022static inline void flush_cache_dup_mm(struct mm_struct *mm)
23{
24}
Mike Rapoport4f0bd802019-12-23 13:00:03 +020025#endif
Qian Caic296d4d2019-07-16 16:27:06 -070026
Mike Rapoport4f0bd802019-12-23 13:00:03 +020027#ifndef flush_cache_range
Qian Caic296d4d2019-07-16 16:27:06 -070028static inline void flush_cache_range(struct vm_area_struct *vma,
29 unsigned long start,
30 unsigned long end)
31{
32}
Mike Rapoport4f0bd802019-12-23 13:00:03 +020033#endif
Qian Caic296d4d2019-07-16 16:27:06 -070034
Mike Rapoport4f0bd802019-12-23 13:00:03 +020035#ifndef flush_cache_page
Qian Caic296d4d2019-07-16 16:27:06 -070036static inline void flush_cache_page(struct vm_area_struct *vma,
37 unsigned long vmaddr,
38 unsigned long pfn)
39{
40}
Mike Rapoport4f0bd802019-12-23 13:00:03 +020041#endif
Qian Caic296d4d2019-07-16 16:27:06 -070042
Christoph Hellwig76b3b582020-06-07 21:41:45 -070043#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
Qian Caic296d4d2019-07-16 16:27:06 -070044static inline void flush_dcache_page(struct page *page)
45{
46}
Christoph Hellwig76b3b582020-06-07 21:41:45 -070047#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
Mike Rapoport4f0bd802019-12-23 13:00:03 +020048#endif
Qian Caic296d4d2019-07-16 16:27:06 -070049
Christoph Hellwig76b3b582020-06-07 21:41:45 -070050
Mike Rapoport4f0bd802019-12-23 13:00:03 +020051#ifndef flush_dcache_mmap_lock
Qian Caic296d4d2019-07-16 16:27:06 -070052static inline void flush_dcache_mmap_lock(struct address_space *mapping)
53{
54}
Mike Rapoport4f0bd802019-12-23 13:00:03 +020055#endif
Qian Caic296d4d2019-07-16 16:27:06 -070056
Mike Rapoport4f0bd802019-12-23 13:00:03 +020057#ifndef flush_dcache_mmap_unlock
Qian Caic296d4d2019-07-16 16:27:06 -070058static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
59{
60}
Mike Rapoport4f0bd802019-12-23 13:00:03 +020061#endif
Qian Caic296d4d2019-07-16 16:27:06 -070062
Mike Rapoport4f0bd802019-12-23 13:00:03 +020063#ifndef flush_icache_range
Qian Caic296d4d2019-07-16 16:27:06 -070064static inline void flush_icache_range(unsigned long start, unsigned long end)
65{
66}
Mike Rapoport4f0bd802019-12-23 13:00:03 +020067#endif
Qian Caic296d4d2019-07-16 16:27:06 -070068
Christoph Hellwig1268c332020-06-07 21:42:26 -070069#ifndef flush_icache_user_range
70#define flush_icache_user_range flush_icache_range
71#endif
72
Mike Rapoport4f0bd802019-12-23 13:00:03 +020073#ifndef flush_icache_page
Qian Caic296d4d2019-07-16 16:27:06 -070074static inline void flush_icache_page(struct vm_area_struct *vma,
75 struct page *page)
76{
77}
Mike Rapoport4f0bd802019-12-23 13:00:03 +020078#endif
Qian Caic296d4d2019-07-16 16:27:06 -070079
Christoph Hellwig885f7f82020-06-07 21:42:22 -070080#ifndef flush_icache_user_page
81static inline void flush_icache_user_page(struct vm_area_struct *vma,
Qian Caic296d4d2019-07-16 16:27:06 -070082 struct page *page,
83 unsigned long addr, int len)
84{
85}
Mike Rapoport4f0bd802019-12-23 13:00:03 +020086#endif
Qian Caic296d4d2019-07-16 16:27:06 -070087
Mike Rapoport4f0bd802019-12-23 13:00:03 +020088#ifndef flush_cache_vmap
Qian Caic296d4d2019-07-16 16:27:06 -070089static inline void flush_cache_vmap(unsigned long start, unsigned long end)
90{
91}
Mike Rapoport4f0bd802019-12-23 13:00:03 +020092#endif
Qian Caic296d4d2019-07-16 16:27:06 -070093
Mike Rapoport4f0bd802019-12-23 13:00:03 +020094#ifndef flush_cache_vunmap
Qian Caic296d4d2019-07-16 16:27:06 -070095static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
96{
97}
Mike Rapoport4f0bd802019-12-23 13:00:03 +020098#endif
Arnd Bergmann5c01b462009-05-13 22:56:36 +000099
Mike Rapoport4f0bd802019-12-23 13:00:03 +0200100#ifndef copy_to_user_page
101#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
Mike Frysingerf68aa5b2011-05-24 17:12:53 -0700102 do { \
103 memcpy(dst, src, len); \
Christoph Hellwig885f7f82020-06-07 21:42:22 -0700104 flush_icache_user_page(vma, page, vaddr, len); \
Mike Frysingerf68aa5b2011-05-24 17:12:53 -0700105 } while (0)
Mike Rapoport4f0bd802019-12-23 13:00:03 +0200106#endif
107
108#ifndef copy_from_user_page
Arnd Bergmann5c01b462009-05-13 22:56:36 +0000109#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
110 memcpy(dst, src, len)
Mike Rapoport4f0bd802019-12-23 13:00:03 +0200111#endif
Arnd Bergmann5c01b462009-05-13 22:56:36 +0000112
Christoph Hellwig92a73bd2020-06-07 21:41:39 -0700113#endif /* _ASM_GENERIC_CACHEFLUSH_H */