blob: 6a769a6c314ef3c5c2d31c9ab7441e43a4d5808f [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/arch/arm/mm/copypage-v6.c
4 *
5 * Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
7#include <linux/init.h>
8#include <linux/spinlock.h>
9#include <linux/mm.h>
Russell King063b0a42008-10-31 15:08:35 +000010#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <asm/shmparam.h>
13#include <asm/tlbflush.h>
14#include <asm/cacheflush.h>
Russell King46097c72008-08-10 18:10:19 +010015#include <asm/cachetype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
Russell King1b2e2b72006-08-21 17:06:38 +010017#include "mm.h"
18
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#if SHMLBA > 16384
20#error FIX ME
21#endif
22
Thomas Gleixnerbd31b852009-07-03 08:44:46 -050023static DEFINE_RAW_SPINLOCK(v6_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Linus Torvalds1da177e2005-04-16 15:20:36 -070025/*
26 * Copy the user page. No aliasing to deal with so we can just
27 * attack the kernel's existing mapping of these pages.
28 */
Russell King063b0a42008-10-31 15:08:35 +000029static void v6_copy_user_highpage_nonaliasing(struct page *to,
Russell Kingf00a75c2009-10-05 15:17:45 +010030 struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -070031{
Russell King063b0a42008-10-31 15:08:35 +000032 void *kto, *kfrom;
33
Cong Wang5472e862011-11-25 23:14:15 +080034 kfrom = kmap_atomic(from);
35 kto = kmap_atomic(to);
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 copy_page(kto, kfrom);
Cong Wang5472e862011-11-25 23:14:15 +080037 kunmap_atomic(kto);
38 kunmap_atomic(kfrom);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039}
40
41/*
42 * Clear the user page. No aliasing to deal with so we can just
43 * attack the kernel's existing mapping of this page.
44 */
Russell King303c6442008-10-31 16:32:19 +000045static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070046{
Cong Wang5472e862011-11-25 23:14:15 +080047 void *kaddr = kmap_atomic(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 clear_page(kaddr);
Cong Wang5472e862011-11-25 23:14:15 +080049 kunmap_atomic(kaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070050}
51
52/*
Russell King063b0a42008-10-31 15:08:35 +000053 * Discard data in the kernel mapping for the new page.
54 * FIXME: needs this MCRR to be supported.
Linus Torvalds1da177e2005-04-16 15:20:36 -070055 */
Russell King063b0a42008-10-31 15:08:35 +000056static void discard_old_kernel_data(void *kto)
Linus Torvalds1da177e2005-04-16 15:20:36 -070057{
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
59 :
60 : "r" (kto),
Jungseung Lee80231872014-11-29 02:51:49 +010061 "r" ((unsigned long)kto + PAGE_SIZE - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 : "cc");
Russell King063b0a42008-10-31 15:08:35 +000063}
64
65/*
66 * Copy the page, taking account of the cache colour.
67 */
68static void v6_copy_user_highpage_aliasing(struct page *to,
Russell Kingf00a75c2009-10-05 15:17:45 +010069 struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
Russell King063b0a42008-10-31 15:08:35 +000070{
71 unsigned int offset = CACHE_COLOUR(vaddr);
72 unsigned long kfrom, kto;
73
Catalin Marinasc0177802010-09-13 15:57:36 +010074 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
Huang Yingcb9f7532018-04-05 16:24:39 -070075 __flush_dcache_page(page_mapping_file(from), from);
Russell King063b0a42008-10-31 15:08:35 +000076
77 /* FIXME: not highmem safe */
78 discard_old_kernel_data(page_address(to));
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
80 /*
81 * Now copy the page using the same cache colour as the
82 * pages ultimate destination.
83 */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -050084 raw_spin_lock(&v6_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Russell Kingde27c302011-07-02 14:46:27 +010086 kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT);
87 kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
Russell King67ece142011-07-02 15:20:44 +010089 set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL));
90 set_top_pte(kto, mk_pte(to, PAGE_KERNEL));
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
Russell King063b0a42008-10-31 15:08:35 +000092 copy_page((void *)kto, (void *)kfrom);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
Thomas Gleixnerbd31b852009-07-03 08:44:46 -050094 raw_spin_unlock(&v6_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095}
96
97/*
98 * Clear the user page. We need to deal with the aliasing issues,
99 * so remap the kernel page into the same cache colour as the user
100 * page.
101 */
Russell King303c6442008-10-31 16:32:19 +0000102static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103{
Russell Kingde27c302011-07-02 14:46:27 +0100104 unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
Russell King303c6442008-10-31 16:32:19 +0000106 /* FIXME: not highmem safe */
107 discard_old_kernel_data(page_address(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
109 /*
110 * Now clear the page using the same cache colour as
111 * the pages ultimate destination.
112 */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500113 raw_spin_lock(&v6_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Russell King67ece142011-07-02 15:20:44 +0100115 set_top_pte(to, mk_pte(page, PAGE_KERNEL));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 clear_page((void *)to);
117
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500118 raw_spin_unlock(&v6_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119}
120
121struct cpu_user_fns v6_user_fns __initdata = {
Russell King303c6442008-10-31 16:32:19 +0000122 .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing,
Russell King063b0a42008-10-31 15:08:35 +0000123 .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124};
125
126static int __init v6_userpage_init(void)
127{
128 if (cache_is_vipt_aliasing()) {
Russell King303c6442008-10-31 16:32:19 +0000129 cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing;
Russell King063b0a42008-10-31 15:08:35 +0000130 cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 }
132
133 return 0;
134}
135
Russell King08ee4e42005-05-10 17:30:47 +0100136core_initcall(v6_userpage_init);