blob: a97d5f1a3905948a9cb27a0979e0d4a9fb87fb8c [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Benjamin Herrenschmidt5daf9072005-11-18 14:09:41 +11002/* align.c - handle alignment exceptions for the Power PC.
3 *
4 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
5 * Copyright (c) 1998-1999 TiVo, Inc.
6 * PowerPC 403GCX modifications.
7 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
8 * PowerPC 403GCX/405GP modifications.
9 * Copyright (c) 2001-2002 PPC64 team, IBM Corp
10 * 64-bit and Power4 support
11 * Copyright (c) 2005 Benjamin Herrenschmidt, IBM Corp
12 * <benh@kernel.crashing.org>
13 * Merge ppc32 and ppc64 implementations
Benjamin Herrenschmidt5daf9072005-11-18 14:09:41 +110014 */
15
16#include <linux/kernel.h>
17#include <linux/mm.h>
18#include <asm/processor.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080019#include <linux/uaccess.h>
Benjamin Herrenschmidt5daf9072005-11-18 14:09:41 +110020#include <asm/cache.h>
21#include <asm/cputable.h>
Geert Uytterhoeven80947e72009-05-18 02:10:05 +000022#include <asm/emulated_ops.h>
David Howellsae3a1972012-03-28 18:30:02 +010023#include <asm/switch_to.h>
Aneesh Kumar K.Vddca1562014-05-12 17:04:06 +053024#include <asm/disassemble.h>
Kevin Haob92a2262016-07-23 14:42:40 +053025#include <asm/cpu_has_feature.h>
Paul Mackerras31bfdb02017-08-30 14:12:40 +100026#include <asm/sstep.h>
Jordan Niethe75346252020-05-06 13:40:26 +100027#include <asm/inst.h>
Benjamin Herrenschmidt5daf9072005-11-18 14:09:41 +110028
29struct aligninfo {
30 unsigned char len;
31 unsigned char flags;
32};
33
Benjamin Herrenschmidt5daf9072005-11-18 14:09:41 +110034
35#define INVALID { 0, 0 }
36
Paul Mackerrasfab5db92006-06-07 16:14:40 +100037/* Bits in the flags field */
38#define LD 0 /* load */
39#define ST 1 /* store */
Paul Mackerrasc6d42672007-08-10 14:07:38 +100040#define SE 2 /* sign-extend value, or FP ld/st as word */
Paul Mackerrasfab5db92006-06-07 16:14:40 +100041#define SW 0x20 /* byte swap */
Kumar Gala26caeb22007-08-24 16:42:53 -050042#define E4 0x40 /* SPE endianness is word */
43#define E8 0x80 /* SPE endianness is double word */
Anton Blanchardf83319d2014-03-28 17:01:23 +110044
Kumar Gala26caeb22007-08-24 16:42:53 -050045#ifdef CONFIG_SPE
46
47static struct aligninfo spe_aligninfo[32] = {
48 { 8, LD+E8 }, /* 0 00 00: evldd[x] */
49 { 8, LD+E4 }, /* 0 00 01: evldw[x] */
50 { 8, LD }, /* 0 00 10: evldh[x] */
51 INVALID, /* 0 00 11 */
52 { 2, LD }, /* 0 01 00: evlhhesplat[x] */
53 INVALID, /* 0 01 01 */
54 { 2, LD }, /* 0 01 10: evlhhousplat[x] */
55 { 2, LD+SE }, /* 0 01 11: evlhhossplat[x] */
56 { 4, LD }, /* 0 10 00: evlwhe[x] */
57 INVALID, /* 0 10 01 */
58 { 4, LD }, /* 0 10 10: evlwhou[x] */
59 { 4, LD+SE }, /* 0 10 11: evlwhos[x] */
60 { 4, LD+E4 }, /* 0 11 00: evlwwsplat[x] */
61 INVALID, /* 0 11 01 */
62 { 4, LD }, /* 0 11 10: evlwhsplat[x] */
63 INVALID, /* 0 11 11 */
64
65 { 8, ST+E8 }, /* 1 00 00: evstdd[x] */
66 { 8, ST+E4 }, /* 1 00 01: evstdw[x] */
67 { 8, ST }, /* 1 00 10: evstdh[x] */
68 INVALID, /* 1 00 11 */
69 INVALID, /* 1 01 00 */
70 INVALID, /* 1 01 01 */
71 INVALID, /* 1 01 10 */
72 INVALID, /* 1 01 11 */
73 { 4, ST }, /* 1 10 00: evstwhe[x] */
74 INVALID, /* 1 10 01 */
75 { 4, ST }, /* 1 10 10: evstwho[x] */
76 INVALID, /* 1 10 11 */
77 { 4, ST+E4 }, /* 1 11 00: evstwwe[x] */
78 INVALID, /* 1 11 01 */
79 { 4, ST+E4 }, /* 1 11 10: evstwwo[x] */
80 INVALID, /* 1 11 11 */
81};
82
83#define EVLDD 0x00
84#define EVLDW 0x01
85#define EVLDH 0x02
86#define EVLHHESPLAT 0x04
87#define EVLHHOUSPLAT 0x06
88#define EVLHHOSSPLAT 0x07
89#define EVLWHE 0x08
90#define EVLWHOU 0x0A
91#define EVLWHOS 0x0B
92#define EVLWWSPLAT 0x0C
93#define EVLWHSPLAT 0x0E
94#define EVSTDD 0x10
95#define EVSTDW 0x11
96#define EVSTDH 0x12
97#define EVSTWHE 0x18
98#define EVSTWHO 0x1A
99#define EVSTWWE 0x1C
100#define EVSTWWO 0x1E
101
102/*
103 * Emulate SPE loads and stores.
104 * Only Book-E has these instructions, and it does true little-endian,
105 * so we don't need the address swizzling.
106 */
107static int emulate_spe(struct pt_regs *regs, unsigned int reg,
Jordan Niethe94afd062020-05-06 13:40:31 +1000108 struct ppc_inst ppc_instr)
Kumar Gala26caeb22007-08-24 16:42:53 -0500109{
Kumar Gala26caeb22007-08-24 16:42:53 -0500110 union {
111 u64 ll;
112 u32 w[2];
113 u16 h[4];
114 u8 v[8];
115 } data, temp;
116 unsigned char __user *p, *addr;
117 unsigned long *evr = &current->thread.evr[reg];
Jordan Niethe94afd062020-05-06 13:40:31 +1000118 unsigned int nb, flags, instr;
Kumar Gala26caeb22007-08-24 16:42:53 -0500119
Jordan Niethe94afd062020-05-06 13:40:31 +1000120 instr = ppc_inst_val(ppc_instr);
Kumar Gala26caeb22007-08-24 16:42:53 -0500121 instr = (instr >> 1) & 0x1f;
122
123 /* DAR has the operand effective address */
124 addr = (unsigned char __user *)regs->dar;
125
126 nb = spe_aligninfo[instr].len;
127 flags = spe_aligninfo[instr].flags;
128
Kumar Gala26caeb22007-08-24 16:42:53 -0500129 /* userland only */
130 if (unlikely(!user_mode(regs)))
131 return 0;
132
133 flush_spe_to_thread(current);
134
135 /* If we are loading, get the data from user space, else
136 * get it from register values
137 */
138 if (flags & ST) {
139 data.ll = 0;
140 switch (instr) {
141 case EVSTDD:
142 case EVSTDW:
143 case EVSTDH:
144 data.w[0] = *evr;
145 data.w[1] = regs->gpr[reg];
146 break;
147 case EVSTWHE:
148 data.h[2] = *evr >> 16;
149 data.h[3] = regs->gpr[reg] >> 16;
150 break;
151 case EVSTWHO:
152 data.h[2] = *evr & 0xffff;
153 data.h[3] = regs->gpr[reg] & 0xffff;
154 break;
155 case EVSTWWE:
156 data.w[1] = *evr;
157 break;
158 case EVSTWWO:
159 data.w[1] = regs->gpr[reg];
160 break;
161 default:
162 return -EINVAL;
163 }
164 } else {
165 temp.ll = data.ll = 0;
Kumar Gala26caeb22007-08-24 16:42:53 -0500166 p = addr;
167
Christophe Leroy3fa3db32021-03-12 13:25:11 +0000168 if (!user_read_access_begin(addr, nb))
169 return -EFAULT;
170
Kumar Gala26caeb22007-08-24 16:42:53 -0500171 switch (nb) {
172 case 8:
Christophe Leroy3fa3db32021-03-12 13:25:11 +0000173 unsafe_get_user(temp.v[0], p++, Efault_read);
174 unsafe_get_user(temp.v[1], p++, Efault_read);
175 unsafe_get_user(temp.v[2], p++, Efault_read);
176 unsafe_get_user(temp.v[3], p++, Efault_read);
Gustavo A. R. Silva5e66a0c2020-07-27 17:42:01 -0500177 fallthrough;
Kumar Gala26caeb22007-08-24 16:42:53 -0500178 case 4:
Christophe Leroy3fa3db32021-03-12 13:25:11 +0000179 unsafe_get_user(temp.v[4], p++, Efault_read);
180 unsafe_get_user(temp.v[5], p++, Efault_read);
Gustavo A. R. Silva5e66a0c2020-07-27 17:42:01 -0500181 fallthrough;
Kumar Gala26caeb22007-08-24 16:42:53 -0500182 case 2:
Christophe Leroy3fa3db32021-03-12 13:25:11 +0000183 unsafe_get_user(temp.v[6], p++, Efault_read);
184 unsafe_get_user(temp.v[7], p++, Efault_read);
Kumar Gala26caeb22007-08-24 16:42:53 -0500185 }
Christophe Leroy3fa3db32021-03-12 13:25:11 +0000186 user_read_access_end();
Kumar Gala26caeb22007-08-24 16:42:53 -0500187
188 switch (instr) {
189 case EVLDD:
190 case EVLDW:
191 case EVLDH:
192 data.ll = temp.ll;
193 break;
194 case EVLHHESPLAT:
195 data.h[0] = temp.h[3];
196 data.h[2] = temp.h[3];
197 break;
198 case EVLHHOUSPLAT:
199 case EVLHHOSSPLAT:
200 data.h[1] = temp.h[3];
201 data.h[3] = temp.h[3];
202 break;
203 case EVLWHE:
204 data.h[0] = temp.h[2];
205 data.h[2] = temp.h[3];
206 break;
207 case EVLWHOU:
208 case EVLWHOS:
209 data.h[1] = temp.h[2];
210 data.h[3] = temp.h[3];
211 break;
212 case EVLWWSPLAT:
213 data.w[0] = temp.w[1];
214 data.w[1] = temp.w[1];
215 break;
216 case EVLWHSPLAT:
217 data.h[0] = temp.h[2];
218 data.h[1] = temp.h[2];
219 data.h[2] = temp.h[3];
220 data.h[3] = temp.h[3];
221 break;
222 default:
223 return -EINVAL;
224 }
225 }
226
227 if (flags & SW) {
228 switch (flags & 0xf0) {
229 case E8:
Anton Blanchardf6261902013-09-23 12:04:46 +1000230 data.ll = swab64(data.ll);
Kumar Gala26caeb22007-08-24 16:42:53 -0500231 break;
232 case E4:
Anton Blanchardf6261902013-09-23 12:04:46 +1000233 data.w[0] = swab32(data.w[0]);
234 data.w[1] = swab32(data.w[1]);
Kumar Gala26caeb22007-08-24 16:42:53 -0500235 break;
236 /* Its half word endian */
237 default:
Anton Blanchardf6261902013-09-23 12:04:46 +1000238 data.h[0] = swab16(data.h[0]);
239 data.h[1] = swab16(data.h[1]);
240 data.h[2] = swab16(data.h[2]);
241 data.h[3] = swab16(data.h[3]);
Kumar Gala26caeb22007-08-24 16:42:53 -0500242 break;
243 }
244 }
245
246 if (flags & SE) {
247 data.w[0] = (s16)data.h[1];
248 data.w[1] = (s16)data.h[3];
249 }
250
251 /* Store result to memory or update registers */
252 if (flags & ST) {
Kumar Gala26caeb22007-08-24 16:42:53 -0500253 p = addr;
Christophe Leroy3fa3db32021-03-12 13:25:11 +0000254
255 if (!user_write_access_begin(addr, nb))
256 return -EFAULT;
257
Kumar Gala26caeb22007-08-24 16:42:53 -0500258 switch (nb) {
259 case 8:
Christophe Leroy3fa3db32021-03-12 13:25:11 +0000260 unsafe_put_user(data.v[0], p++, Efault_write);
261 unsafe_put_user(data.v[1], p++, Efault_write);
262 unsafe_put_user(data.v[2], p++, Efault_write);
263 unsafe_put_user(data.v[3], p++, Efault_write);
Gustavo A. R. Silva5e66a0c2020-07-27 17:42:01 -0500264 fallthrough;
Kumar Gala26caeb22007-08-24 16:42:53 -0500265 case 4:
Christophe Leroy3fa3db32021-03-12 13:25:11 +0000266 unsafe_put_user(data.v[4], p++, Efault_write);
267 unsafe_put_user(data.v[5], p++, Efault_write);
Gustavo A. R. Silva5e66a0c2020-07-27 17:42:01 -0500268 fallthrough;
Kumar Gala26caeb22007-08-24 16:42:53 -0500269 case 2:
Christophe Leroy3fa3db32021-03-12 13:25:11 +0000270 unsafe_put_user(data.v[6], p++, Efault_write);
271 unsafe_put_user(data.v[7], p++, Efault_write);
Kumar Gala26caeb22007-08-24 16:42:53 -0500272 }
Christophe Leroy3fa3db32021-03-12 13:25:11 +0000273 user_write_access_end();
Kumar Gala26caeb22007-08-24 16:42:53 -0500274 } else {
275 *evr = data.w[0];
276 regs->gpr[reg] = data.w[1];
277 }
278
279 return 1;
Christophe Leroy3fa3db32021-03-12 13:25:11 +0000280
281Efault_read:
282 user_read_access_end();
283 return -EFAULT;
284
285Efault_write:
286 user_write_access_end();
287 return -EFAULT;
Kumar Gala26caeb22007-08-24 16:42:53 -0500288}
289#endif /* CONFIG_SPE */
Benjamin Herrenschmidt5daf9072005-11-18 14:09:41 +1100290
291/*
292 * Called on alignment exception. Attempts to fixup
293 *
294 * Return 1 on success
295 * Return 0 if unable to handle the interrupt
296 * Return -EFAULT if data address is bad
Paul Mackerras31bfdb02017-08-30 14:12:40 +1000297 * Other negative return values indicate that the instruction can't
298 * be emulated, and the process should be given a SIGBUS.
Benjamin Herrenschmidt5daf9072005-11-18 14:09:41 +1100299 */
300
301int fix_alignment(struct pt_regs *regs)
302{
Jordan Niethe94afd062020-05-06 13:40:31 +1000303 struct ppc_inst instr;
Paul Mackerras31bfdb02017-08-30 14:12:40 +1000304 struct instruction_op op;
305 int r, type;
Benjamin Herrenschmidt5daf9072005-11-18 14:09:41 +1100306
307 /*
308 * We require a complete register set, if not, then our assembly
309 * is broken
310 */
311 CHECK_FULL_REGS(regs);
312
Christophe Leroy111631b2021-03-10 17:46:45 +0000313 if (is_kernel_addr(regs->nip))
314 r = probe_kernel_read_inst(&instr, (void *)regs->nip);
315 else
316 r = __get_user_instr(instr, (void __user *)regs->nip);
317
318 if (unlikely(r))
Paul Mackerras31bfdb02017-08-30 14:12:40 +1000319 return -EFAULT;
320 if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) {
321 /* We don't handle PPC little-endian any more... */
322 if (cpu_has_feature(CPU_FTR_PPC_LE))
323 return -EIO;
Jordan Nietheaabd2232020-05-06 13:40:29 +1000324 instr = ppc_inst_swab(instr);
Benjamin Herrenschmidt5daf9072005-11-18 14:09:41 +1100325 }
326
Kumar Gala26caeb22007-08-24 16:42:53 -0500327#ifdef CONFIG_SPE
Jordan Niethe80948922020-05-06 13:40:28 +1000328 if (ppc_inst_primary_opcode(instr) == 0x4) {
Jordan Niethe777e26f2020-05-06 13:40:27 +1000329 int reg = (ppc_inst_val(instr) >> 21) & 0x1f;
Anton Blanchardeecff812009-10-27 18:46:55 +0000330 PPC_WARN_ALIGNMENT(spe, regs);
Kumar Gala26caeb22007-08-24 16:42:53 -0500331 return emulate_spe(regs, reg, instr);
Geert Uytterhoeven80947e72009-05-18 02:10:05 +0000332 }
Kumar Gala26caeb22007-08-24 16:42:53 -0500333#endif
334
Chris Smartae26b362016-06-17 09:33:45 +1000335
336 /*
337 * ISA 3.0 (such as P9) copy, copy_first, paste and paste_last alignment
338 * check.
339 *
340 * Send a SIGBUS to the process that caused the fault.
341 *
342 * We do not emulate these because paste may contain additional metadata
343 * when pasting to a co-processor. Furthermore, paste_last is the
344 * synchronisation point for preceding copy/paste sequences.
345 */
Jordan Niethe777e26f2020-05-06 13:40:27 +1000346 if ((ppc_inst_val(instr) & 0xfc0006fe) == (PPC_INST_COPY & 0xfc0006fe))
Chris Smartae26b362016-06-17 09:33:45 +1000347 return -EIO;
348
Paul Mackerras31bfdb02017-08-30 14:12:40 +1000349 r = analyse_instr(&op, regs, instr);
350 if (r < 0)
351 return -EINVAL;
352
Ravi Bangoriae6684d02018-05-21 09:51:06 +0530353 type = GETTYPE(op.type);
Paul Mackerras31bfdb02017-08-30 14:12:40 +1000354 if (!OP_IS_LOAD_STORE(type)) {
Paul Mackerras1bc944c2017-09-13 14:51:24 +1000355 if (op.type != CACHEOP + DCBZ)
Paul Mackerras31bfdb02017-08-30 14:12:40 +1000356 return -EINVAL;
Anton Blanchardeecff812009-10-27 18:46:55 +0000357 PPC_WARN_ALIGNMENT(dcbz, regs);
Paul Mackerras31bfdb02017-08-30 14:12:40 +1000358 r = emulate_dcbz(op.ea, regs);
359 } else {
360 if (type == LARX || type == STCX)
361 return -EIO;
362 PPC_WARN_ALIGNMENT(unaligned, regs);
363 r = emulate_loadstore(regs, &op);
Geert Uytterhoeven80947e72009-05-18 02:10:05 +0000364 }
Benjamin Herrenschmidt5daf9072005-11-18 14:09:41 +1100365
Paul Mackerras31bfdb02017-08-30 14:12:40 +1000366 if (!r)
367 return 1;
368 return r;
Benjamin Herrenschmidt5daf9072005-11-18 14:09:41 +1100369}