blob: 2d0276abe0a68a6077f167b558fc1d51dbec41db [file] [log] [blame]
Thomas Gleixnerde6cc652019-05-27 08:55:02 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Jeremy Kerr7cd58e42007-12-20 16:39:59 +09002/*
Ian Munsiee83d0162014-10-08 19:54:50 +11003 * CoProcessor (SPU/AFU) mm fault handler
Jeremy Kerr7cd58e42007-12-20 16:39:59 +09004 *
5 * (C) Copyright IBM Deutschland Entwicklung GmbH 2007
6 *
7 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * Author: Jeremy Kerr <jk@ozlabs.org>
Jeremy Kerr7cd58e42007-12-20 16:39:59 +09009 */
10#include <linux/sched.h>
11#include <linux/mm.h>
Paul Gortmaker4b16f8e2011-07-22 18:24:23 -040012#include <linux/export.h>
Ian Munsiee83d0162014-10-08 19:54:50 +110013#include <asm/reg.h>
Ian Munsie73d16a62014-10-08 19:54:51 +110014#include <asm/copro.h>
Ian Munsiebe3ebfe2014-10-08 19:54:52 +110015#include <asm/spu.h>
Michael Neulingec249dd2015-05-27 16:07:16 +100016#include <misc/cxl-base.h>
Jeremy Kerr7cd58e42007-12-20 16:39:59 +090017
18/*
19 * This ought to be kept in sync with the powerpc specific do_page_fault
20 * function. Currently, there are a few corner cases that we haven't had
21 * to handle fortunately.
22 */
Ian Munsiee83d0162014-10-08 19:54:50 +110023int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
Souptick Joarder50a7ca32018-08-17 15:44:47 -070024 unsigned long dsisr, vm_fault_t *flt)
Jeremy Kerr7cd58e42007-12-20 16:39:59 +090025{
26 struct vm_area_struct *vma;
27 unsigned long is_write;
28 int ret;
29
Jeremy Kerr60ee03192009-02-17 11:44:14 +110030 if (mm == NULL)
Jeremy Kerr7cd58e42007-12-20 16:39:59 +090031 return -EFAULT;
Jeremy Kerr60ee03192009-02-17 11:44:14 +110032
33 if (mm->pgd == NULL)
Jeremy Kerr7cd58e42007-12-20 16:39:59 +090034 return -EFAULT;
Jeremy Kerr7cd58e42007-12-20 16:39:59 +090035
Michel Lespinassed8ed45c2020-06-08 21:33:25 -070036 mmap_read_lock(mm);
Jeremy Kerr60ee03192009-02-17 11:44:14 +110037 ret = -EFAULT;
Jeremy Kerr7cd58e42007-12-20 16:39:59 +090038 vma = find_vma(mm, ea);
39 if (!vma)
Jeremy Kerr60ee03192009-02-17 11:44:14 +110040 goto out_unlock;
41
42 if (ea < vma->vm_start) {
43 if (!(vma->vm_flags & VM_GROWSDOWN))
44 goto out_unlock;
45 if (expand_stack(vma, ea))
46 goto out_unlock;
47 }
48
Ian Munsiee83d0162014-10-08 19:54:50 +110049 is_write = dsisr & DSISR_ISSTORE;
Jeremy Kerr7cd58e42007-12-20 16:39:59 +090050 if (is_write) {
51 if (!(vma->vm_flags & VM_WRITE))
Jeremy Kerr60ee03192009-02-17 11:44:14 +110052 goto out_unlock;
Jeremy Kerr7cd58e42007-12-20 16:39:59 +090053 } else {
Jeremy Kerr7cd58e42007-12-20 16:39:59 +090054 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
Jeremy Kerr60ee03192009-02-17 11:44:14 +110055 goto out_unlock;
Mel Gorman842915f2015-02-12 14:58:25 -080056 /*
Aneesh Kumar K.V18061c12017-01-30 21:42:59 +053057 * PROT_NONE is covered by the VMA check above.
58 * and hash should get a NOHPTE fault instead of
59 * a PROTFAULT in case fixup is needed for things
60 * like autonuma.
Mel Gorman842915f2015-02-12 14:58:25 -080061 */
Aneesh Kumar K.V18061c12017-01-30 21:42:59 +053062 if (!radix_enabled())
63 WARN_ON_ONCE(dsisr & DSISR_PROTFAULT);
Jeremy Kerr7cd58e42007-12-20 16:39:59 +090064 }
Jeremy Kerr60ee03192009-02-17 11:44:14 +110065
Jeremy Kerr7cd58e42007-12-20 16:39:59 +090066 ret = 0;
Peter Xubce617e2020-08-11 18:37:44 -070067 *flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0, NULL);
Jeremy Kerr7cd58e42007-12-20 16:39:59 +090068 if (unlikely(*flt & VM_FAULT_ERROR)) {
69 if (*flt & VM_FAULT_OOM) {
70 ret = -ENOMEM;
Jeremy Kerr60ee03192009-02-17 11:44:14 +110071 goto out_unlock;
Linus Torvalds33692f22015-01-29 10:51:32 -080072 } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
Jeremy Kerr7cd58e42007-12-20 16:39:59 +090073 ret = -EFAULT;
Jeremy Kerr60ee03192009-02-17 11:44:14 +110074 goto out_unlock;
Jeremy Kerr7cd58e42007-12-20 16:39:59 +090075 }
76 BUG();
77 }
Jeremy Kerr60ee03192009-02-17 11:44:14 +110078
Jeremy Kerr7cd58e42007-12-20 16:39:59 +090079 if (*flt & VM_FAULT_MAJOR)
80 current->maj_flt++;
81 else
82 current->min_flt++;
Jeremy Kerr60ee03192009-02-17 11:44:14 +110083
84out_unlock:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -070085 mmap_read_unlock(mm);
Jeremy Kerr7cd58e42007-12-20 16:39:59 +090086 return ret;
Jeremy Kerr7cd58e42007-12-20 16:39:59 +090087}
Ian Munsiee83d0162014-10-08 19:54:50 +110088EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
Ian Munsie73d16a62014-10-08 19:54:51 +110089
90int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
91{
Michael Neuling85a97da2015-05-27 16:06:55 +100092 u64 vsid, vsidkey;
Ian Munsie73d16a62014-10-08 19:54:51 +110093 int psize, ssize;
94
Aneesh Kumar K.V0034d392019-04-17 18:29:14 +053095 switch (get_region_id(ea)) {
Ian Munsie73d16a62014-10-08 19:54:51 +110096 case USER_REGION_ID:
97 pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
Frederic Barratd2cf9092016-06-17 18:53:28 +020098 if (mm == NULL)
99 return 1;
Ian Munsie73d16a62014-10-08 19:54:51 +1100100 psize = get_slice_psize(mm, ea);
101 ssize = user_segment_size(ea);
Aneesh Kumar K.Vf384796c2018-03-26 15:34:48 +0530102 vsid = get_user_vsid(&mm->context, ea, ssize);
Michael Neuling85a97da2015-05-27 16:06:55 +1000103 vsidkey = SLB_VSID_USER;
Ian Munsie73d16a62014-10-08 19:54:51 +1100104 break;
105 case VMALLOC_REGION_ID:
106 pr_devel("%s: 0x%llx -- VMALLOC_REGION_ID\n", __func__, ea);
Aneesh Kumar K.V0034d392019-04-17 18:29:14 +0530107 psize = mmu_vmalloc_psize;
108 ssize = mmu_kernel_ssize;
109 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
110 vsidkey = SLB_VSID_KERNEL;
111 break;
112 case IO_REGION_ID:
113 pr_devel("%s: 0x%llx -- IO_REGION_ID\n", __func__, ea);
114 psize = mmu_io_psize;
Ian Munsie73d16a62014-10-08 19:54:51 +1100115 ssize = mmu_kernel_ssize;
116 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
Michael Neuling85a97da2015-05-27 16:06:55 +1000117 vsidkey = SLB_VSID_KERNEL;
Ian Munsie73d16a62014-10-08 19:54:51 +1100118 break;
Aneesh Kumar K.V5f53d282019-04-17 18:29:19 +0530119 case LINEAR_MAP_REGION_ID:
120 pr_devel("%s: 0x%llx -- LINEAR_MAP_REGION_ID\n", __func__, ea);
Ian Munsie73d16a62014-10-08 19:54:51 +1100121 psize = mmu_linear_psize;
122 ssize = mmu_kernel_ssize;
123 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
Michael Neuling85a97da2015-05-27 16:06:55 +1000124 vsidkey = SLB_VSID_KERNEL;
Ian Munsie73d16a62014-10-08 19:54:51 +1100125 break;
126 default:
127 pr_debug("%s: invalid region access at %016llx\n", __func__, ea);
128 return 1;
129 }
Aneesh Kumar K.V64168f42016-11-15 20:36:06 +0530130 /* Bad address */
131 if (!vsid)
132 return 1;
Ian Munsie73d16a62014-10-08 19:54:51 +1100133
Michael Neuling85a97da2015-05-27 16:06:55 +1000134 vsid = (vsid << slb_vsid_shift(ssize)) | vsidkey;
Ian Munsie73d16a62014-10-08 19:54:51 +1100135
136 vsid |= mmu_psize_defs[psize].sllp |
137 ((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0);
138
Ian Munsie03f54392014-10-28 14:25:29 +1100139 slb->esid = (ea & (ssize == MMU_SEGSIZE_1T ? ESID_MASK_1T : ESID_MASK)) | SLB_ESID_V;
Ian Munsie73d16a62014-10-08 19:54:51 +1100140 slb->vsid = vsid;
141
142 return 0;
143}
144EXPORT_SYMBOL_GPL(copro_calculate_slb);
Ian Munsiebe3ebfe2014-10-08 19:54:52 +1100145
146void copro_flush_all_slbs(struct mm_struct *mm)
147{
148#ifdef CONFIG_SPU_BASE
149 spu_flush_all_slbs(mm);
150#endif
Ian Munsie4c6d9ac2014-10-08 19:55:00 +1100151 cxl_slbia(mm);
Ian Munsiebe3ebfe2014-10-08 19:54:52 +1100152}
153EXPORT_SYMBOL_GPL(copro_flush_all_slbs);