blob: dc19300309d2880e4ad74c4baaaa40e3b5f619d8 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Catalin Marinasf1a0c4a2012-03-05 11:49:28 +00002/*
3 * Based on arch/arm/mm/flush.c
4 *
5 * Copyright (C) 1995-2002 Russell King
6 * Copyright (C) 2012 ARM Ltd.
Catalin Marinasf1a0c4a2012-03-05 11:49:28 +00007 */
8
9#include <linux/export.h>
10#include <linux/mm.h>
11#include <linux/pagemap.h>
12
13#include <asm/cacheflush.h>
Will Deacon02f77602017-03-10 20:32:23 +000014#include <asm/cache.h>
Catalin Marinasf1a0c4a2012-03-05 11:49:28 +000015#include <asm/tlbflush.h>
16
Pratyush Anand9842cea2016-11-02 14:40:46 +053017void sync_icache_aliases(void *kaddr, unsigned long len)
Ashok Kumar0a287142015-12-17 01:38:32 -080018{
19 unsigned long addr = (unsigned long)kaddr;
20
21 if (icache_is_aliasing()) {
22 __clean_dcache_area_pou(kaddr, len);
23 __flush_icache_all();
24 } else {
Catalin Marinas132fdc32019-01-24 17:28:37 +000025 /*
26 * Don't issue kick_all_cpus_sync() after I-cache invalidation
27 * for user mappings.
28 */
29 __flush_icache_range(addr, addr + len);
Ashok Kumar0a287142015-12-17 01:38:32 -080030 }
31}
32
Catalin Marinasf1a0c4a2012-03-05 11:49:28 +000033static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
34 unsigned long uaddr, void *kaddr,
35 unsigned long len)
36{
Ashok Kumar0a287142015-12-17 01:38:32 -080037 if (vma->vm_flags & VM_EXEC)
38 sync_icache_aliases(kaddr, len);
Catalin Marinasf1a0c4a2012-03-05 11:49:28 +000039}
40
41/*
42 * Copy user data from/to a page which is mapped into a different processes
43 * address space. Really, we want to allow our "user space" model to handle
44 * this.
Catalin Marinasf1a0c4a2012-03-05 11:49:28 +000045 */
46void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
47 unsigned long uaddr, void *dst, const void *src,
48 unsigned long len)
49{
Catalin Marinasf1a0c4a2012-03-05 11:49:28 +000050 memcpy(dst, src, len);
51 flush_ptrace_access(vma, page, uaddr, dst, len);
Catalin Marinasf1a0c4a2012-03-05 11:49:28 +000052}
53
Shaokun Zhang907e21c2018-04-17 20:03:09 +080054void __sync_icache_dcache(pte_t pte)
Catalin Marinasf1a0c4a2012-03-05 11:49:28 +000055{
Catalin Marinas7249b792013-05-01 16:34:22 +010056 struct page *page = pte_page(pte);
Catalin Marinasf1a0c4a2012-03-05 11:49:28 +000057
Ashok Kumar0a287142015-12-17 01:38:32 -080058 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
59 sync_icache_aliases(page_address(page),
60 PAGE_SIZE << compound_order(page));
Catalin Marinasf1a0c4a2012-03-05 11:49:28 +000061}
Ben Hutchingsc5157102018-07-12 00:18:22 +010062EXPORT_SYMBOL_GPL(__sync_icache_dcache);
Catalin Marinasf1a0c4a2012-03-05 11:49:28 +000063
64/*
Catalin Marinasb5b6c9e2013-05-01 12:23:05 +010065 * This function is called when a page has been modified by the kernel. Mark
66 * it as dirty for later flushing when mapped in user space (if executable,
67 * see __sync_icache_dcache).
Catalin Marinasf1a0c4a2012-03-05 11:49:28 +000068 */
69void flush_dcache_page(struct page *page)
70{
Catalin Marinasb5b6c9e2013-05-01 12:23:05 +010071 if (test_bit(PG_dcache_clean, &page->flags))
Catalin Marinasf1a0c4a2012-03-05 11:49:28 +000072 clear_bit(PG_dcache_clean, &page->flags);
Catalin Marinasf1a0c4a2012-03-05 11:49:28 +000073}
74EXPORT_SYMBOL(flush_dcache_page);
75
76/*
77 * Additional functions defined in assembly.
78 */
Will Deaconbedbeec2018-07-06 16:21:17 +010079EXPORT_SYMBOL(__flush_icache_range);
Robin Murphyd50e0712017-07-25 11:55:42 +010080
81#ifdef CONFIG_ARCH_HAS_PMEM_API
Arnd Bergmanncaf5ef72017-08-10 16:52:31 +020082void arch_wb_cache_pmem(void *addr, size_t size)
Robin Murphyd50e0712017-07-25 11:55:42 +010083{
84 /* Ensure order against any prior non-cacheable writes */
85 dmb(osh);
86 __clean_dcache_area_pop(addr, size);
87}
88EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
89
Arnd Bergmanncaf5ef72017-08-10 16:52:31 +020090void arch_invalidate_pmem(void *addr, size_t size)
Robin Murphyd50e0712017-07-25 11:55:42 +010091{
92 __inval_dcache_area(addr, size);
93}
94EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
95#endif