blob: 4f07afacbc23953861443be0939fb8d70116396e [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Christoph Hellwig92a73bd2020-06-07 21:41:39 -07002#ifndef _ASM_GENERIC_CACHEFLUSH_H
3#define _ASM_GENERIC_CACHEFLUSH_H
Arnd Bergmann5c01b462009-05-13 22:56:36 +00004
Stephen Rothwell8dbdd502020-06-25 20:30:07 -07005struct mm_struct;
6struct vm_area_struct;
7struct page;
8struct address_space;
9
Arnd Bergmann5c01b462009-05-13 22:56:36 +000010/*
11 * The cache doesn't need to be flushed when TLB entries change when
12 * the cache is mapped to physical memory, not virtual memory
13 */
Mike Rapoport4f0bd802019-12-23 13:00:03 +020014#ifndef flush_cache_all
Qian Caic296d4d2019-07-16 16:27:06 -070015static inline void flush_cache_all(void)
16{
17}
Mike Rapoport4f0bd802019-12-23 13:00:03 +020018#endif
Qian Caic296d4d2019-07-16 16:27:06 -070019
Mike Rapoport4f0bd802019-12-23 13:00:03 +020020#ifndef flush_cache_mm
Qian Caic296d4d2019-07-16 16:27:06 -070021static inline void flush_cache_mm(struct mm_struct *mm)
22{
23}
Mike Rapoport4f0bd802019-12-23 13:00:03 +020024#endif
Qian Caic296d4d2019-07-16 16:27:06 -070025
Mike Rapoport4f0bd802019-12-23 13:00:03 +020026#ifndef flush_cache_dup_mm
Qian Caic296d4d2019-07-16 16:27:06 -070027static inline void flush_cache_dup_mm(struct mm_struct *mm)
28{
29}
Mike Rapoport4f0bd802019-12-23 13:00:03 +020030#endif
Qian Caic296d4d2019-07-16 16:27:06 -070031
Mike Rapoport4f0bd802019-12-23 13:00:03 +020032#ifndef flush_cache_range
Qian Caic296d4d2019-07-16 16:27:06 -070033static inline void flush_cache_range(struct vm_area_struct *vma,
34 unsigned long start,
35 unsigned long end)
36{
37}
Mike Rapoport4f0bd802019-12-23 13:00:03 +020038#endif
Qian Caic296d4d2019-07-16 16:27:06 -070039
Mike Rapoport4f0bd802019-12-23 13:00:03 +020040#ifndef flush_cache_page
Qian Caic296d4d2019-07-16 16:27:06 -070041static inline void flush_cache_page(struct vm_area_struct *vma,
42 unsigned long vmaddr,
43 unsigned long pfn)
44{
45}
Mike Rapoport4f0bd802019-12-23 13:00:03 +020046#endif
Qian Caic296d4d2019-07-16 16:27:06 -070047
Christoph Hellwig76b3b582020-06-07 21:41:45 -070048#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
Qian Caic296d4d2019-07-16 16:27:06 -070049static inline void flush_dcache_page(struct page *page)
50{
51}
Matthew Wilcox (Oracle)08b0b002020-12-16 11:06:33 -050052
Christoph Hellwig76b3b582020-06-07 21:41:45 -070053#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
Matthew Wilcox (Oracle)08b0b002020-12-16 11:06:33 -050054#endif
Christoph Hellwig76b3b582020-06-07 21:41:45 -070055
Mike Rapoport4f0bd802019-12-23 13:00:03 +020056#ifndef flush_dcache_mmap_lock
Qian Caic296d4d2019-07-16 16:27:06 -070057static inline void flush_dcache_mmap_lock(struct address_space *mapping)
58{
59}
Mike Rapoport4f0bd802019-12-23 13:00:03 +020060#endif
Qian Caic296d4d2019-07-16 16:27:06 -070061
Mike Rapoport4f0bd802019-12-23 13:00:03 +020062#ifndef flush_dcache_mmap_unlock
Qian Caic296d4d2019-07-16 16:27:06 -070063static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
64{
65}
Mike Rapoport4f0bd802019-12-23 13:00:03 +020066#endif
Qian Caic296d4d2019-07-16 16:27:06 -070067
Mike Rapoport4f0bd802019-12-23 13:00:03 +020068#ifndef flush_icache_range
Qian Caic296d4d2019-07-16 16:27:06 -070069static inline void flush_icache_range(unsigned long start, unsigned long end)
70{
71}
Mike Rapoport4f0bd802019-12-23 13:00:03 +020072#endif
Qian Caic296d4d2019-07-16 16:27:06 -070073
Christoph Hellwig1268c332020-06-07 21:42:26 -070074#ifndef flush_icache_user_range
75#define flush_icache_user_range flush_icache_range
76#endif
77
Mike Rapoport4f0bd802019-12-23 13:00:03 +020078#ifndef flush_icache_page
Qian Caic296d4d2019-07-16 16:27:06 -070079static inline void flush_icache_page(struct vm_area_struct *vma,
80 struct page *page)
81{
82}
Mike Rapoport4f0bd802019-12-23 13:00:03 +020083#endif
Qian Caic296d4d2019-07-16 16:27:06 -070084
Christoph Hellwig885f7f82020-06-07 21:42:22 -070085#ifndef flush_icache_user_page
86static inline void flush_icache_user_page(struct vm_area_struct *vma,
Qian Caic296d4d2019-07-16 16:27:06 -070087 struct page *page,
88 unsigned long addr, int len)
89{
90}
Mike Rapoport4f0bd802019-12-23 13:00:03 +020091#endif
Qian Caic296d4d2019-07-16 16:27:06 -070092
Mike Rapoport4f0bd802019-12-23 13:00:03 +020093#ifndef flush_cache_vmap
Qian Caic296d4d2019-07-16 16:27:06 -070094static inline void flush_cache_vmap(unsigned long start, unsigned long end)
95{
96}
Mike Rapoport4f0bd802019-12-23 13:00:03 +020097#endif
Qian Caic296d4d2019-07-16 16:27:06 -070098
Mike Rapoport4f0bd802019-12-23 13:00:03 +020099#ifndef flush_cache_vunmap
Qian Caic296d4d2019-07-16 16:27:06 -0700100static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
101{
102}
Mike Rapoport4f0bd802019-12-23 13:00:03 +0200103#endif
Arnd Bergmann5c01b462009-05-13 22:56:36 +0000104
Mike Rapoport4f0bd802019-12-23 13:00:03 +0200105#ifndef copy_to_user_page
106#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
Mike Frysingerf68aa5b2011-05-24 17:12:53 -0700107 do { \
108 memcpy(dst, src, len); \
Christoph Hellwig885f7f82020-06-07 21:42:22 -0700109 flush_icache_user_page(vma, page, vaddr, len); \
Mike Frysingerf68aa5b2011-05-24 17:12:53 -0700110 } while (0)
Mike Rapoport4f0bd802019-12-23 13:00:03 +0200111#endif
112
113#ifndef copy_from_user_page
Arnd Bergmann5c01b462009-05-13 22:56:36 +0000114#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
115 memcpy(dst, src, len)
Mike Rapoport4f0bd802019-12-23 13:00:03 +0200116#endif
Arnd Bergmann5c01b462009-05-13 22:56:36 +0000117
Christoph Hellwig92a73bd2020-06-07 21:41:39 -0700118#endif /* _ASM_GENERIC_CACHEFLUSH_H */