blob: 0e4e9abbf4de365d8b979822abde3dab53e6163f [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +03002/*
3 * x86_64/AVX2/AES-NI assembler implementation of Camellia
4 *
5 * Copyright © 2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +03006 */
7
8#include <linux/linkage.h>
Josh Poimboeuf8691ccd2016-01-21 16:49:19 -06009#include <asm/frame.h>
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +030010
11#define CAMELLIA_TABLE_BYTE_LEN 272
12
13/* struct camellia_ctx: */
14#define key_table 0
15#define key_length CAMELLIA_TABLE_BYTE_LEN
16
17/* register macros */
18#define CTX %rdi
19#define RIO %r8
20
21/**********************************************************************
22 helper macros
23 **********************************************************************/
24#define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \
25 vpand x, mask4bit, tmp0; \
26 vpandn x, mask4bit, x; \
27 vpsrld $4, x, x; \
28 \
29 vpshufb tmp0, lo_t, tmp0; \
30 vpshufb x, hi_t, x; \
31 vpxor tmp0, x, x;
32
33#define ymm0_x xmm0
34#define ymm1_x xmm1
35#define ymm2_x xmm2
36#define ymm3_x xmm3
37#define ymm4_x xmm4
38#define ymm5_x xmm5
39#define ymm6_x xmm6
40#define ymm7_x xmm7
41#define ymm8_x xmm8
42#define ymm9_x xmm9
43#define ymm10_x xmm10
44#define ymm11_x xmm11
45#define ymm12_x xmm12
46#define ymm13_x xmm13
47#define ymm14_x xmm14
48#define ymm15_x xmm15
49
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +030050/**********************************************************************
51 32-way camellia
52 **********************************************************************/
53
54/*
55 * IN:
56 * x0..x7: byte-sliced AB state
57 * mem_cd: register pointer storing CD state
58 * key: index for key material
59 * OUT:
60 * x0..x7: new byte-sliced CD state
61 */
62#define roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \
63 t7, mem_cd, key) \
64 /* \
65 * S-function with AES subbytes \
66 */ \
67 vbroadcasti128 .Linv_shift_row, t4; \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +030068 vpbroadcastd .L0f0f0f0f, t7; \
69 vbroadcasti128 .Lpre_tf_lo_s1, t5; \
70 vbroadcasti128 .Lpre_tf_hi_s1, t6; \
71 vbroadcasti128 .Lpre_tf_lo_s4, t2; \
72 vbroadcasti128 .Lpre_tf_hi_s4, t3; \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +030073 \
74 /* AES inverse shift rows */ \
75 vpshufb t4, x0, x0; \
76 vpshufb t4, x7, x7; \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +030077 vpshufb t4, x3, x3; \
78 vpshufb t4, x6, x6; \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +030079 vpshufb t4, x2, x2; \
80 vpshufb t4, x5, x5; \
81 vpshufb t4, x1, x1; \
82 vpshufb t4, x4, x4; \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +030083 \
84 /* prefilter sboxes 1, 2 and 3 */ \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +030085 /* prefilter sbox 4 */ \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +030086 filter_8bit(x0, t5, t6, t7, t4); \
87 filter_8bit(x7, t5, t6, t7, t4); \
88 vextracti128 $1, x0, t0##_x; \
89 vextracti128 $1, x7, t1##_x; \
90 filter_8bit(x3, t2, t3, t7, t4); \
91 filter_8bit(x6, t2, t3, t7, t4); \
92 vextracti128 $1, x3, t3##_x; \
93 vextracti128 $1, x6, t2##_x; \
94 filter_8bit(x2, t5, t6, t7, t4); \
95 filter_8bit(x5, t5, t6, t7, t4); \
96 filter_8bit(x1, t5, t6, t7, t4); \
97 filter_8bit(x4, t5, t6, t7, t4); \
98 \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +030099 vpxor t4##_x, t4##_x, t4##_x; \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300100 \
101 /* AES subbytes + AES shift rows */ \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300102 vextracti128 $1, x2, t6##_x; \
103 vextracti128 $1, x5, t5##_x; \
104 vaesenclast t4##_x, x0##_x, x0##_x; \
105 vaesenclast t4##_x, t0##_x, t0##_x; \
106 vinserti128 $1, t0##_x, x0, x0; \
107 vaesenclast t4##_x, x7##_x, x7##_x; \
108 vaesenclast t4##_x, t1##_x, t1##_x; \
109 vinserti128 $1, t1##_x, x7, x7; \
110 vaesenclast t4##_x, x3##_x, x3##_x; \
111 vaesenclast t4##_x, t3##_x, t3##_x; \
112 vinserti128 $1, t3##_x, x3, x3; \
113 vaesenclast t4##_x, x6##_x, x6##_x; \
114 vaesenclast t4##_x, t2##_x, t2##_x; \
115 vinserti128 $1, t2##_x, x6, x6; \
116 vextracti128 $1, x1, t3##_x; \
117 vextracti128 $1, x4, t2##_x; \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300118 vbroadcasti128 .Lpost_tf_lo_s1, t0; \
119 vbroadcasti128 .Lpost_tf_hi_s1, t1; \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300120 vaesenclast t4##_x, x2##_x, x2##_x; \
121 vaesenclast t4##_x, t6##_x, t6##_x; \
122 vinserti128 $1, t6##_x, x2, x2; \
123 vaesenclast t4##_x, x5##_x, x5##_x; \
124 vaesenclast t4##_x, t5##_x, t5##_x; \
125 vinserti128 $1, t5##_x, x5, x5; \
126 vaesenclast t4##_x, x1##_x, x1##_x; \
127 vaesenclast t4##_x, t3##_x, t3##_x; \
128 vinserti128 $1, t3##_x, x1, x1; \
129 vaesenclast t4##_x, x4##_x, x4##_x; \
130 vaesenclast t4##_x, t2##_x, t2##_x; \
131 vinserti128 $1, t2##_x, x4, x4; \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300132 \
133 /* postfilter sboxes 1 and 4 */ \
134 vbroadcasti128 .Lpost_tf_lo_s3, t2; \
135 vbroadcasti128 .Lpost_tf_hi_s3, t3; \
136 filter_8bit(x0, t0, t1, t7, t6); \
137 filter_8bit(x7, t0, t1, t7, t6); \
138 filter_8bit(x3, t0, t1, t7, t6); \
139 filter_8bit(x6, t0, t1, t7, t6); \
140 \
141 /* postfilter sbox 3 */ \
142 vbroadcasti128 .Lpost_tf_lo_s2, t4; \
143 vbroadcasti128 .Lpost_tf_hi_s2, t5; \
144 filter_8bit(x2, t2, t3, t7, t6); \
145 filter_8bit(x5, t2, t3, t7, t6); \
146 \
147 vpbroadcastq key, t0; /* higher 64-bit duplicate ignored */ \
148 \
149 /* postfilter sbox 2 */ \
150 filter_8bit(x1, t4, t5, t7, t2); \
151 filter_8bit(x4, t4, t5, t7, t2); \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300152 vpxor t7, t7, t7; \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300153 \
154 vpsrldq $1, t0, t1; \
155 vpsrldq $2, t0, t2; \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300156 vpshufb t7, t1, t1; \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300157 vpsrldq $3, t0, t3; \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300158 \
159 /* P-function */ \
160 vpxor x5, x0, x0; \
161 vpxor x6, x1, x1; \
162 vpxor x7, x2, x2; \
163 vpxor x4, x3, x3; \
164 \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300165 vpshufb t7, t2, t2; \
166 vpsrldq $4, t0, t4; \
167 vpshufb t7, t3, t3; \
168 vpsrldq $5, t0, t5; \
169 vpshufb t7, t4, t4; \
170 \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300171 vpxor x2, x4, x4; \
172 vpxor x3, x5, x5; \
173 vpxor x0, x6, x6; \
174 vpxor x1, x7, x7; \
175 \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300176 vpsrldq $6, t0, t6; \
177 vpshufb t7, t5, t5; \
178 vpshufb t7, t6, t6; \
179 \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300180 vpxor x7, x0, x0; \
181 vpxor x4, x1, x1; \
182 vpxor x5, x2, x2; \
183 vpxor x6, x3, x3; \
184 \
185 vpxor x3, x4, x4; \
186 vpxor x0, x5, x5; \
187 vpxor x1, x6, x6; \
188 vpxor x2, x7, x7; /* note: high and low parts swapped */ \
189 \
190 /* Add key material and result to CD (x becomes new CD) */ \
191 \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300192 vpxor t6, x1, x1; \
193 vpxor 5 * 32(mem_cd), x1, x1; \
194 \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300195 vpsrldq $7, t0, t6; \
196 vpshufb t7, t0, t0; \
197 vpshufb t7, t6, t7; \
198 \
199 vpxor t7, x0, x0; \
200 vpxor 4 * 32(mem_cd), x0, x0; \
201 \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300202 vpxor t5, x2, x2; \
203 vpxor 6 * 32(mem_cd), x2, x2; \
204 \
205 vpxor t4, x3, x3; \
206 vpxor 7 * 32(mem_cd), x3, x3; \
207 \
208 vpxor t3, x4, x4; \
209 vpxor 0 * 32(mem_cd), x4, x4; \
210 \
211 vpxor t2, x5, x5; \
212 vpxor 1 * 32(mem_cd), x5, x5; \
213 \
214 vpxor t1, x6, x6; \
215 vpxor 2 * 32(mem_cd), x6, x6; \
216 \
217 vpxor t0, x7, x7; \
218 vpxor 3 * 32(mem_cd), x7, x7;
219
220/*
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300221 * Size optimization... with inlined roundsm32 binary would be over 5 times
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300222 * larger and would only marginally faster.
223 */
224.align 8
Jiri Slaby74d8b902019-10-11 13:50:46 +0200225SYM_FUNC_START_LOCAL(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300226 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
227 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
228 %rcx, (%r9));
Peter Zijlstraf94909c2021-12-04 14:43:40 +0100229 RET;
Jiri Slaby74d8b902019-10-11 13:50:46 +0200230SYM_FUNC_END(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300231
232.align 8
Jiri Slaby74d8b902019-10-11 13:50:46 +0200233SYM_FUNC_START_LOCAL(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300234 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
235 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
236 %rax, (%r9));
Peter Zijlstraf94909c2021-12-04 14:43:40 +0100237 RET;
Jiri Slaby74d8b902019-10-11 13:50:46 +0200238SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300239
240/*
241 * IN/OUT:
242 * x0..x7: byte-sliced AB state preloaded
243 * mem_ab: byte-sliced AB state in memory
244 * mem_cb: byte-sliced CD state in memory
245 */
246#define two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
247 y6, y7, mem_ab, mem_cd, i, dir, store_ab) \
248 leaq (key_table + (i) * 8)(CTX), %r9; \
249 call roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd; \
250 \
251 vmovdqu x0, 4 * 32(mem_cd); \
252 vmovdqu x1, 5 * 32(mem_cd); \
253 vmovdqu x2, 6 * 32(mem_cd); \
254 vmovdqu x3, 7 * 32(mem_cd); \
255 vmovdqu x4, 0 * 32(mem_cd); \
256 vmovdqu x5, 1 * 32(mem_cd); \
257 vmovdqu x6, 2 * 32(mem_cd); \
258 vmovdqu x7, 3 * 32(mem_cd); \
259 \
260 leaq (key_table + ((i) + (dir)) * 8)(CTX), %r9; \
261 call roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab; \
262 \
263 store_ab(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab);
264
265#define dummy_store(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) /* do nothing */
266
267#define store_ab_state(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) \
268 /* Store new AB state */ \
269 vmovdqu x4, 4 * 32(mem_ab); \
270 vmovdqu x5, 5 * 32(mem_ab); \
271 vmovdqu x6, 6 * 32(mem_ab); \
272 vmovdqu x7, 7 * 32(mem_ab); \
273 vmovdqu x0, 0 * 32(mem_ab); \
274 vmovdqu x1, 1 * 32(mem_ab); \
275 vmovdqu x2, 2 * 32(mem_ab); \
276 vmovdqu x3, 3 * 32(mem_ab);
277
278#define enc_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
279 y6, y7, mem_ab, mem_cd, i) \
280 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
281 y6, y7, mem_ab, mem_cd, (i) + 2, 1, store_ab_state); \
282 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
283 y6, y7, mem_ab, mem_cd, (i) + 4, 1, store_ab_state); \
284 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
285 y6, y7, mem_ab, mem_cd, (i) + 6, 1, dummy_store);
286
287#define dec_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
288 y6, y7, mem_ab, mem_cd, i) \
289 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
290 y6, y7, mem_ab, mem_cd, (i) + 7, -1, store_ab_state); \
291 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
292 y6, y7, mem_ab, mem_cd, (i) + 5, -1, store_ab_state); \
293 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
294 y6, y7, mem_ab, mem_cd, (i) + 3, -1, dummy_store);
295
296/*
297 * IN:
298 * v0..3: byte-sliced 32-bit integers
299 * OUT:
300 * v0..3: (IN <<< 1)
301 */
302#define rol32_1_32(v0, v1, v2, v3, t0, t1, t2, zero) \
303 vpcmpgtb v0, zero, t0; \
304 vpaddb v0, v0, v0; \
305 vpabsb t0, t0; \
306 \
307 vpcmpgtb v1, zero, t1; \
308 vpaddb v1, v1, v1; \
309 vpabsb t1, t1; \
310 \
311 vpcmpgtb v2, zero, t2; \
312 vpaddb v2, v2, v2; \
313 vpabsb t2, t2; \
314 \
315 vpor t0, v1, v1; \
316 \
317 vpcmpgtb v3, zero, t0; \
318 vpaddb v3, v3, v3; \
319 vpabsb t0, t0; \
320 \
321 vpor t1, v2, v2; \
322 vpor t2, v3, v3; \
323 vpor t0, v0, v0;
324
325/*
326 * IN:
327 * r: byte-sliced AB state in memory
328 * l: byte-sliced CD state in memory
329 * OUT:
330 * x0..x7: new byte-sliced CD state
331 */
332#define fls32(l, l0, l1, l2, l3, l4, l5, l6, l7, r, t0, t1, t2, t3, tt0, \
333 tt1, tt2, tt3, kll, klr, krl, krr) \
334 /* \
335 * t0 = kll; \
336 * t0 &= ll; \
337 * lr ^= rol32(t0, 1); \
338 */ \
339 vpbroadcastd kll, t0; /* only lowest 32-bit used */ \
340 vpxor tt0, tt0, tt0; \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300341 vpshufb tt0, t0, t3; \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300342 vpsrldq $1, t0, t0; \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300343 vpshufb tt0, t0, t2; \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300344 vpsrldq $1, t0, t0; \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300345 vpshufb tt0, t0, t1; \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300346 vpsrldq $1, t0, t0; \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300347 vpshufb tt0, t0, t0; \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300348 \
349 vpand l0, t0, t0; \
350 vpand l1, t1, t1; \
351 vpand l2, t2, t2; \
352 vpand l3, t3, t3; \
353 \
354 rol32_1_32(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \
355 \
356 vpxor l4, t0, l4; \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300357 vpbroadcastd krr, t0; /* only lowest 32-bit used */ \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300358 vmovdqu l4, 4 * 32(l); \
359 vpxor l5, t1, l5; \
360 vmovdqu l5, 5 * 32(l); \
361 vpxor l6, t2, l6; \
362 vmovdqu l6, 6 * 32(l); \
363 vpxor l7, t3, l7; \
364 vmovdqu l7, 7 * 32(l); \
365 \
366 /* \
367 * t2 = krr; \
368 * t2 |= rr; \
369 * rl ^= t2; \
370 */ \
371 \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300372 vpshufb tt0, t0, t3; \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300373 vpsrldq $1, t0, t0; \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300374 vpshufb tt0, t0, t2; \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300375 vpsrldq $1, t0, t0; \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300376 vpshufb tt0, t0, t1; \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300377 vpsrldq $1, t0, t0; \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300378 vpshufb tt0, t0, t0; \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300379 \
380 vpor 4 * 32(r), t0, t0; \
381 vpor 5 * 32(r), t1, t1; \
382 vpor 6 * 32(r), t2, t2; \
383 vpor 7 * 32(r), t3, t3; \
384 \
385 vpxor 0 * 32(r), t0, t0; \
386 vpxor 1 * 32(r), t1, t1; \
387 vpxor 2 * 32(r), t2, t2; \
388 vpxor 3 * 32(r), t3, t3; \
389 vmovdqu t0, 0 * 32(r); \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300390 vpbroadcastd krl, t0; /* only lowest 32-bit used */ \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300391 vmovdqu t1, 1 * 32(r); \
392 vmovdqu t2, 2 * 32(r); \
393 vmovdqu t3, 3 * 32(r); \
394 \
395 /* \
396 * t2 = krl; \
397 * t2 &= rl; \
398 * rr ^= rol32(t2, 1); \
399 */ \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300400 vpshufb tt0, t0, t3; \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300401 vpsrldq $1, t0, t0; \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300402 vpshufb tt0, t0, t2; \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300403 vpsrldq $1, t0, t0; \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300404 vpshufb tt0, t0, t1; \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300405 vpsrldq $1, t0, t0; \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300406 vpshufb tt0, t0, t0; \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300407 \
408 vpand 0 * 32(r), t0, t0; \
409 vpand 1 * 32(r), t1, t1; \
410 vpand 2 * 32(r), t2, t2; \
411 vpand 3 * 32(r), t3, t3; \
412 \
413 rol32_1_32(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \
414 \
415 vpxor 4 * 32(r), t0, t0; \
416 vpxor 5 * 32(r), t1, t1; \
417 vpxor 6 * 32(r), t2, t2; \
418 vpxor 7 * 32(r), t3, t3; \
419 vmovdqu t0, 4 * 32(r); \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300420 vpbroadcastd klr, t0; /* only lowest 32-bit used */ \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300421 vmovdqu t1, 5 * 32(r); \
422 vmovdqu t2, 6 * 32(r); \
423 vmovdqu t3, 7 * 32(r); \
424 \
425 /* \
426 * t0 = klr; \
427 * t0 |= lr; \
428 * ll ^= t0; \
429 */ \
430 \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300431 vpshufb tt0, t0, t3; \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300432 vpsrldq $1, t0, t0; \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300433 vpshufb tt0, t0, t2; \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300434 vpsrldq $1, t0, t0; \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300435 vpshufb tt0, t0, t1; \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300436 vpsrldq $1, t0, t0; \
Jussi Kivilinnaacfffdb2013-06-08 12:00:59 +0300437 vpshufb tt0, t0, t0; \
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300438 \
439 vpor l4, t0, t0; \
440 vpor l5, t1, t1; \
441 vpor l6, t2, t2; \
442 vpor l7, t3, t3; \
443 \
444 vpxor l0, t0, l0; \
445 vmovdqu l0, 0 * 32(l); \
446 vpxor l1, t1, l1; \
447 vmovdqu l1, 1 * 32(l); \
448 vpxor l2, t2, l2; \
449 vmovdqu l2, 2 * 32(l); \
450 vpxor l3, t3, l3; \
451 vmovdqu l3, 3 * 32(l);
452
453#define transpose_4x4(x0, x1, x2, x3, t1, t2) \
454 vpunpckhdq x1, x0, t2; \
455 vpunpckldq x1, x0, x0; \
456 \
457 vpunpckldq x3, x2, t1; \
458 vpunpckhdq x3, x2, x2; \
459 \
460 vpunpckhqdq t1, x0, x1; \
461 vpunpcklqdq t1, x0, x0; \
462 \
463 vpunpckhqdq x2, t2, x3; \
464 vpunpcklqdq x2, t2, x2;
465
466#define byteslice_16x16b_fast(a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, \
467 a3, b3, c3, d3, st0, st1) \
468 vmovdqu d2, st0; \
469 vmovdqu d3, st1; \
470 transpose_4x4(a0, a1, a2, a3, d2, d3); \
471 transpose_4x4(b0, b1, b2, b3, d2, d3); \
472 vmovdqu st0, d2; \
473 vmovdqu st1, d3; \
474 \
475 vmovdqu a0, st0; \
476 vmovdqu a1, st1; \
477 transpose_4x4(c0, c1, c2, c3, a0, a1); \
478 transpose_4x4(d0, d1, d2, d3, a0, a1); \
479 \
480 vbroadcasti128 .Lshufb_16x16b, a0; \
481 vmovdqu st1, a1; \
482 vpshufb a0, a2, a2; \
483 vpshufb a0, a3, a3; \
484 vpshufb a0, b0, b0; \
485 vpshufb a0, b1, b1; \
486 vpshufb a0, b2, b2; \
487 vpshufb a0, b3, b3; \
488 vpshufb a0, a1, a1; \
489 vpshufb a0, c0, c0; \
490 vpshufb a0, c1, c1; \
491 vpshufb a0, c2, c2; \
492 vpshufb a0, c3, c3; \
493 vpshufb a0, d0, d0; \
494 vpshufb a0, d1, d1; \
495 vpshufb a0, d2, d2; \
496 vpshufb a0, d3, d3; \
497 vmovdqu d3, st1; \
498 vmovdqu st0, d3; \
499 vpshufb a0, d3, a0; \
500 vmovdqu d2, st0; \
501 \
502 transpose_4x4(a0, b0, c0, d0, d2, d3); \
503 transpose_4x4(a1, b1, c1, d1, d2, d3); \
504 vmovdqu st0, d2; \
505 vmovdqu st1, d3; \
506 \
507 vmovdqu b0, st0; \
508 vmovdqu b1, st1; \
509 transpose_4x4(a2, b2, c2, d2, b0, b1); \
510 transpose_4x4(a3, b3, c3, d3, b0, b1); \
511 vmovdqu st0, b0; \
512 vmovdqu st1, b1; \
513 /* does not adjust output bytes inside vectors */
514
515/* load blocks to registers and apply pre-whitening */
516#define inpack32_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
517 y6, y7, rio, key) \
518 vpbroadcastq key, x0; \
519 vpshufb .Lpack_bswap, x0, x0; \
520 \
521 vpxor 0 * 32(rio), x0, y7; \
522 vpxor 1 * 32(rio), x0, y6; \
523 vpxor 2 * 32(rio), x0, y5; \
524 vpxor 3 * 32(rio), x0, y4; \
525 vpxor 4 * 32(rio), x0, y3; \
526 vpxor 5 * 32(rio), x0, y2; \
527 vpxor 6 * 32(rio), x0, y1; \
528 vpxor 7 * 32(rio), x0, y0; \
529 vpxor 8 * 32(rio), x0, x7; \
530 vpxor 9 * 32(rio), x0, x6; \
531 vpxor 10 * 32(rio), x0, x5; \
532 vpxor 11 * 32(rio), x0, x4; \
533 vpxor 12 * 32(rio), x0, x3; \
534 vpxor 13 * 32(rio), x0, x2; \
535 vpxor 14 * 32(rio), x0, x1; \
536 vpxor 15 * 32(rio), x0, x0;
537
538/* byteslice pre-whitened blocks and store to temporary memory */
539#define inpack32_post(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
540 y6, y7, mem_ab, mem_cd) \
541 byteslice_16x16b_fast(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, \
542 y4, y5, y6, y7, (mem_ab), (mem_cd)); \
543 \
544 vmovdqu x0, 0 * 32(mem_ab); \
545 vmovdqu x1, 1 * 32(mem_ab); \
546 vmovdqu x2, 2 * 32(mem_ab); \
547 vmovdqu x3, 3 * 32(mem_ab); \
548 vmovdqu x4, 4 * 32(mem_ab); \
549 vmovdqu x5, 5 * 32(mem_ab); \
550 vmovdqu x6, 6 * 32(mem_ab); \
551 vmovdqu x7, 7 * 32(mem_ab); \
552 vmovdqu y0, 0 * 32(mem_cd); \
553 vmovdqu y1, 1 * 32(mem_cd); \
554 vmovdqu y2, 2 * 32(mem_cd); \
555 vmovdqu y3, 3 * 32(mem_cd); \
556 vmovdqu y4, 4 * 32(mem_cd); \
557 vmovdqu y5, 5 * 32(mem_cd); \
558 vmovdqu y6, 6 * 32(mem_cd); \
559 vmovdqu y7, 7 * 32(mem_cd);
560
561/* de-byteslice, apply post-whitening and store blocks */
562#define outunpack32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, \
563 y5, y6, y7, key, stack_tmp0, stack_tmp1) \
564 byteslice_16x16b_fast(y0, y4, x0, x4, y1, y5, x1, x5, y2, y6, x2, x6, \
565 y3, y7, x3, x7, stack_tmp0, stack_tmp1); \
566 \
567 vmovdqu x0, stack_tmp0; \
568 \
569 vpbroadcastq key, x0; \
570 vpshufb .Lpack_bswap, x0, x0; \
571 \
572 vpxor x0, y7, y7; \
573 vpxor x0, y6, y6; \
574 vpxor x0, y5, y5; \
575 vpxor x0, y4, y4; \
576 vpxor x0, y3, y3; \
577 vpxor x0, y2, y2; \
578 vpxor x0, y1, y1; \
579 vpxor x0, y0, y0; \
580 vpxor x0, x7, x7; \
581 vpxor x0, x6, x6; \
582 vpxor x0, x5, x5; \
583 vpxor x0, x4, x4; \
584 vpxor x0, x3, x3; \
585 vpxor x0, x2, x2; \
586 vpxor x0, x1, x1; \
587 vpxor stack_tmp0, x0, x0;
588
589#define write_output(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
590 y6, y7, rio) \
591 vmovdqu x0, 0 * 32(rio); \
592 vmovdqu x1, 1 * 32(rio); \
593 vmovdqu x2, 2 * 32(rio); \
594 vmovdqu x3, 3 * 32(rio); \
595 vmovdqu x4, 4 * 32(rio); \
596 vmovdqu x5, 5 * 32(rio); \
597 vmovdqu x6, 6 * 32(rio); \
598 vmovdqu x7, 7 * 32(rio); \
599 vmovdqu y0, 8 * 32(rio); \
600 vmovdqu y1, 9 * 32(rio); \
601 vmovdqu y2, 10 * 32(rio); \
602 vmovdqu y3, 11 * 32(rio); \
603 vmovdqu y4, 12 * 32(rio); \
604 vmovdqu y5, 13 * 32(rio); \
605 vmovdqu y6, 14 * 32(rio); \
606 vmovdqu y7, 15 * 32(rio);
607
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300608
Denys Vlasenkoe1839142017-01-19 22:33:04 +0100609.section .rodata.cst32.shufb_16x16b, "aM", @progbits, 32
610.align 32
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300611#define SHUFB_BYTES(idx) \
612 0 + (idx), 4 + (idx), 8 + (idx), 12 + (idx)
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300613.Lshufb_16x16b:
614 .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3)
615 .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3)
616
Denys Vlasenkoe1839142017-01-19 22:33:04 +0100617.section .rodata.cst32.pack_bswap, "aM", @progbits, 32
618.align 32
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300619.Lpack_bswap:
620 .long 0x00010203, 0x04050607, 0x80808080, 0x80808080
621 .long 0x00010203, 0x04050607, 0x80808080, 0x80808080
622
Denys Vlasenkoe1839142017-01-19 22:33:04 +0100623/* NB: section is mergeable, all elements must be aligned 16-byte blocks */
624.section .rodata.cst16, "aM", @progbits, 16
625.align 16
626
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300627/*
628 * pre-SubByte transform
629 *
630 * pre-lookup for sbox1, sbox2, sbox3:
631 * swap_bitendianness(
632 * isom_map_camellia_to_aes(
633 * camellia_f(
634 * swap_bitendianess(in)
635 * )
636 * )
637 * )
638 *
639 * (note: '⊕ 0xc5' inside camellia_f())
640 */
641.Lpre_tf_lo_s1:
642 .byte 0x45, 0xe8, 0x40, 0xed, 0x2e, 0x83, 0x2b, 0x86
643 .byte 0x4b, 0xe6, 0x4e, 0xe3, 0x20, 0x8d, 0x25, 0x88
644.Lpre_tf_hi_s1:
645 .byte 0x00, 0x51, 0xf1, 0xa0, 0x8a, 0xdb, 0x7b, 0x2a
646 .byte 0x09, 0x58, 0xf8, 0xa9, 0x83, 0xd2, 0x72, 0x23
647
648/*
649 * pre-SubByte transform
650 *
651 * pre-lookup for sbox4:
652 * swap_bitendianness(
653 * isom_map_camellia_to_aes(
654 * camellia_f(
655 * swap_bitendianess(in <<< 1)
656 * )
657 * )
658 * )
659 *
660 * (note: '⊕ 0xc5' inside camellia_f())
661 */
662.Lpre_tf_lo_s4:
663 .byte 0x45, 0x40, 0x2e, 0x2b, 0x4b, 0x4e, 0x20, 0x25
664 .byte 0x14, 0x11, 0x7f, 0x7a, 0x1a, 0x1f, 0x71, 0x74
665.Lpre_tf_hi_s4:
666 .byte 0x00, 0xf1, 0x8a, 0x7b, 0x09, 0xf8, 0x83, 0x72
667 .byte 0xad, 0x5c, 0x27, 0xd6, 0xa4, 0x55, 0x2e, 0xdf
668
669/*
670 * post-SubByte transform
671 *
672 * post-lookup for sbox1, sbox4:
673 * swap_bitendianness(
674 * camellia_h(
675 * isom_map_aes_to_camellia(
676 * swap_bitendianness(
677 * aes_inverse_affine_transform(in)
678 * )
679 * )
680 * )
681 * )
682 *
683 * (note: '⊕ 0x6e' inside camellia_h())
684 */
685.Lpost_tf_lo_s1:
686 .byte 0x3c, 0xcc, 0xcf, 0x3f, 0x32, 0xc2, 0xc1, 0x31
687 .byte 0xdc, 0x2c, 0x2f, 0xdf, 0xd2, 0x22, 0x21, 0xd1
688.Lpost_tf_hi_s1:
689 .byte 0x00, 0xf9, 0x86, 0x7f, 0xd7, 0x2e, 0x51, 0xa8
690 .byte 0xa4, 0x5d, 0x22, 0xdb, 0x73, 0x8a, 0xf5, 0x0c
691
692/*
693 * post-SubByte transform
694 *
695 * post-lookup for sbox2:
696 * swap_bitendianness(
697 * camellia_h(
698 * isom_map_aes_to_camellia(
699 * swap_bitendianness(
700 * aes_inverse_affine_transform(in)
701 * )
702 * )
703 * )
704 * ) <<< 1
705 *
706 * (note: '⊕ 0x6e' inside camellia_h())
707 */
708.Lpost_tf_lo_s2:
709 .byte 0x78, 0x99, 0x9f, 0x7e, 0x64, 0x85, 0x83, 0x62
710 .byte 0xb9, 0x58, 0x5e, 0xbf, 0xa5, 0x44, 0x42, 0xa3
711.Lpost_tf_hi_s2:
712 .byte 0x00, 0xf3, 0x0d, 0xfe, 0xaf, 0x5c, 0xa2, 0x51
713 .byte 0x49, 0xba, 0x44, 0xb7, 0xe6, 0x15, 0xeb, 0x18
714
715/*
716 * post-SubByte transform
717 *
718 * post-lookup for sbox3:
719 * swap_bitendianness(
720 * camellia_h(
721 * isom_map_aes_to_camellia(
722 * swap_bitendianness(
723 * aes_inverse_affine_transform(in)
724 * )
725 * )
726 * )
727 * ) >>> 1
728 *
729 * (note: '⊕ 0x6e' inside camellia_h())
730 */
731.Lpost_tf_lo_s3:
732 .byte 0x1e, 0x66, 0xe7, 0x9f, 0x19, 0x61, 0xe0, 0x98
733 .byte 0x6e, 0x16, 0x97, 0xef, 0x69, 0x11, 0x90, 0xe8
734.Lpost_tf_hi_s3:
735 .byte 0x00, 0xfc, 0x43, 0xbf, 0xeb, 0x17, 0xa8, 0x54
736 .byte 0x52, 0xae, 0x11, 0xed, 0xb9, 0x45, 0xfa, 0x06
737
738/* For isolating SubBytes from AESENCLAST, inverse shift row */
739.Linv_shift_row:
740 .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b
741 .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03
742
Denys Vlasenkoe1839142017-01-19 22:33:04 +0100743.section .rodata.cst4.L0f0f0f0f, "aM", @progbits, 4
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300744.align 4
745/* 4-bit mask */
746.L0f0f0f0f:
747 .long 0x0f0f0f0f
748
749.text
750
751.align 8
Jiri Slaby74d8b902019-10-11 13:50:46 +0200752SYM_FUNC_START_LOCAL(__camellia_enc_blk32)
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300753 /* input:
754 * %rdi: ctx, CTX
755 * %rax: temporary storage, 512 bytes
756 * %ymm0..%ymm15: 32 plaintext blocks
757 * output:
758 * %ymm0..%ymm15: 32 encrypted blocks, order swapped:
759 * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8
760 */
Josh Poimboeuf8691ccd2016-01-21 16:49:19 -0600761 FRAME_BEGIN
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300762
763 leaq 8 * 32(%rax), %rcx;
764
765 inpack32_post(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
766 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
767 %ymm15, %rax, %rcx);
768
769 enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
770 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
771 %ymm15, %rax, %rcx, 0);
772
773 fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
774 %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
775 %ymm15,
776 ((key_table + (8) * 8) + 0)(CTX),
777 ((key_table + (8) * 8) + 4)(CTX),
778 ((key_table + (8) * 8) + 8)(CTX),
779 ((key_table + (8) * 8) + 12)(CTX));
780
781 enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
782 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
783 %ymm15, %rax, %rcx, 8);
784
785 fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
786 %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
787 %ymm15,
788 ((key_table + (16) * 8) + 0)(CTX),
789 ((key_table + (16) * 8) + 4)(CTX),
790 ((key_table + (16) * 8) + 8)(CTX),
791 ((key_table + (16) * 8) + 12)(CTX));
792
793 enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
794 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
795 %ymm15, %rax, %rcx, 16);
796
797 movl $24, %r8d;
798 cmpl $16, key_length(CTX);
799 jne .Lenc_max32;
800
801.Lenc_done:
802 /* load CD for output */
803 vmovdqu 0 * 32(%rcx), %ymm8;
804 vmovdqu 1 * 32(%rcx), %ymm9;
805 vmovdqu 2 * 32(%rcx), %ymm10;
806 vmovdqu 3 * 32(%rcx), %ymm11;
807 vmovdqu 4 * 32(%rcx), %ymm12;
808 vmovdqu 5 * 32(%rcx), %ymm13;
809 vmovdqu 6 * 32(%rcx), %ymm14;
810 vmovdqu 7 * 32(%rcx), %ymm15;
811
812 outunpack32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
813 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
814 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
815
Josh Poimboeuf8691ccd2016-01-21 16:49:19 -0600816 FRAME_END
Peter Zijlstraf94909c2021-12-04 14:43:40 +0100817 RET;
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300818
819.align 8
820.Lenc_max32:
821 movl $32, %r8d;
822
823 fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
824 %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
825 %ymm15,
826 ((key_table + (24) * 8) + 0)(CTX),
827 ((key_table + (24) * 8) + 4)(CTX),
828 ((key_table + (24) * 8) + 8)(CTX),
829 ((key_table + (24) * 8) + 12)(CTX));
830
831 enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
832 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
833 %ymm15, %rax, %rcx, 24);
834
835 jmp .Lenc_done;
Jiri Slaby74d8b902019-10-11 13:50:46 +0200836SYM_FUNC_END(__camellia_enc_blk32)
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300837
838.align 8
Jiri Slaby74d8b902019-10-11 13:50:46 +0200839SYM_FUNC_START_LOCAL(__camellia_dec_blk32)
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300840 /* input:
841 * %rdi: ctx, CTX
842 * %rax: temporary storage, 512 bytes
843 * %r8d: 24 for 16 byte key, 32 for larger
844 * %ymm0..%ymm15: 16 encrypted blocks
845 * output:
846 * %ymm0..%ymm15: 16 plaintext blocks, order swapped:
847 * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8
848 */
Josh Poimboeuf8691ccd2016-01-21 16:49:19 -0600849 FRAME_BEGIN
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300850
851 leaq 8 * 32(%rax), %rcx;
852
853 inpack32_post(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
854 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
855 %ymm15, %rax, %rcx);
856
857 cmpl $32, %r8d;
858 je .Ldec_max32;
859
860.Ldec_max24:
861 dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
862 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
863 %ymm15, %rax, %rcx, 16);
864
865 fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
866 %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
867 %ymm15,
868 ((key_table + (16) * 8) + 8)(CTX),
869 ((key_table + (16) * 8) + 12)(CTX),
870 ((key_table + (16) * 8) + 0)(CTX),
871 ((key_table + (16) * 8) + 4)(CTX));
872
873 dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
874 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
875 %ymm15, %rax, %rcx, 8);
876
877 fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
878 %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
879 %ymm15,
880 ((key_table + (8) * 8) + 8)(CTX),
881 ((key_table + (8) * 8) + 12)(CTX),
882 ((key_table + (8) * 8) + 0)(CTX),
883 ((key_table + (8) * 8) + 4)(CTX));
884
885 dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
886 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
887 %ymm15, %rax, %rcx, 0);
888
889 /* load CD for output */
890 vmovdqu 0 * 32(%rcx), %ymm8;
891 vmovdqu 1 * 32(%rcx), %ymm9;
892 vmovdqu 2 * 32(%rcx), %ymm10;
893 vmovdqu 3 * 32(%rcx), %ymm11;
894 vmovdqu 4 * 32(%rcx), %ymm12;
895 vmovdqu 5 * 32(%rcx), %ymm13;
896 vmovdqu 6 * 32(%rcx), %ymm14;
897 vmovdqu 7 * 32(%rcx), %ymm15;
898
899 outunpack32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
900 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
901 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
902
Josh Poimboeuf8691ccd2016-01-21 16:49:19 -0600903 FRAME_END
Peter Zijlstraf94909c2021-12-04 14:43:40 +0100904 RET;
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300905
906.align 8
907.Ldec_max32:
908 dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
909 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
910 %ymm15, %rax, %rcx, 24);
911
912 fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
913 %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
914 %ymm15,
915 ((key_table + (24) * 8) + 8)(CTX),
916 ((key_table + (24) * 8) + 12)(CTX),
917 ((key_table + (24) * 8) + 0)(CTX),
918 ((key_table + (24) * 8) + 4)(CTX));
919
920 jmp .Ldec_max24;
Jiri Slaby74d8b902019-10-11 13:50:46 +0200921SYM_FUNC_END(__camellia_dec_blk32)
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300922
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200923SYM_FUNC_START(camellia_ecb_enc_32way)
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300924 /* input:
925 * %rdi: ctx, CTX
926 * %rsi: dst (32 blocks)
927 * %rdx: src (32 blocks)
928 */
Josh Poimboeuf8691ccd2016-01-21 16:49:19 -0600929 FRAME_BEGIN
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300930
931 vzeroupper;
932
933 inpack32_pre(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
934 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
935 %ymm15, %rdx, (key_table)(CTX));
936
937 /* now dst can be used as temporary buffer (even in src == dst case) */
938 movq %rsi, %rax;
939
940 call __camellia_enc_blk32;
941
942 write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0,
943 %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9,
944 %ymm8, %rsi);
945
946 vzeroupper;
947
Josh Poimboeuf8691ccd2016-01-21 16:49:19 -0600948 FRAME_END
Peter Zijlstraf94909c2021-12-04 14:43:40 +0100949 RET;
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200950SYM_FUNC_END(camellia_ecb_enc_32way)
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300951
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200952SYM_FUNC_START(camellia_ecb_dec_32way)
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300953 /* input:
954 * %rdi: ctx, CTX
955 * %rsi: dst (32 blocks)
956 * %rdx: src (32 blocks)
957 */
Josh Poimboeuf8691ccd2016-01-21 16:49:19 -0600958 FRAME_BEGIN
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300959
960 vzeroupper;
961
962 cmpl $16, key_length(CTX);
963 movl $32, %r8d;
964 movl $24, %eax;
965 cmovel %eax, %r8d; /* max */
966
967 inpack32_pre(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
968 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
969 %ymm15, %rdx, (key_table)(CTX, %r8, 8));
970
971 /* now dst can be used as temporary buffer (even in src == dst case) */
972 movq %rsi, %rax;
973
974 call __camellia_dec_blk32;
975
976 write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0,
977 %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9,
978 %ymm8, %rsi);
979
980 vzeroupper;
981
Josh Poimboeuf8691ccd2016-01-21 16:49:19 -0600982 FRAME_END
Peter Zijlstraf94909c2021-12-04 14:43:40 +0100983 RET;
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200984SYM_FUNC_END(camellia_ecb_dec_32way)
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300985
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200986SYM_FUNC_START(camellia_cbc_dec_32way)
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300987 /* input:
988 * %rdi: ctx, CTX
989 * %rsi: dst (32 blocks)
990 * %rdx: src (32 blocks)
991 */
Josh Poimboeuf8691ccd2016-01-21 16:49:19 -0600992 FRAME_BEGIN
Josh Poimboeufdabe5162021-02-24 10:29:18 -0600993 subq $(16 * 32), %rsp;
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +0300994
995 vzeroupper;
996
997 cmpl $16, key_length(CTX);
998 movl $32, %r8d;
999 movl $24, %eax;
1000 cmovel %eax, %r8d; /* max */
1001
1002 inpack32_pre(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
1003 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
1004 %ymm15, %rdx, (key_table)(CTX, %r8, 8));
1005
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +03001006 cmpq %rsi, %rdx;
1007 je .Lcbc_dec_use_stack;
1008
1009 /* dst can be used as temporary storage, src is not overwritten. */
1010 movq %rsi, %rax;
1011 jmp .Lcbc_dec_continue;
1012
1013.Lcbc_dec_use_stack:
1014 /*
1015 * dst still in-use (because dst == src), so use stack for temporary
1016 * storage.
1017 */
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +03001018 movq %rsp, %rax;
1019
1020.Lcbc_dec_continue:
1021 call __camellia_dec_blk32;
1022
1023 vmovdqu %ymm7, (%rax);
1024 vpxor %ymm7, %ymm7, %ymm7;
1025 vinserti128 $1, (%rdx), %ymm7, %ymm7;
1026 vpxor (%rax), %ymm7, %ymm7;
Jussi Kivilinnaf3f935a2013-04-13 13:47:00 +03001027 vpxor (0 * 32 + 16)(%rdx), %ymm6, %ymm6;
1028 vpxor (1 * 32 + 16)(%rdx), %ymm5, %ymm5;
1029 vpxor (2 * 32 + 16)(%rdx), %ymm4, %ymm4;
1030 vpxor (3 * 32 + 16)(%rdx), %ymm3, %ymm3;
1031 vpxor (4 * 32 + 16)(%rdx), %ymm2, %ymm2;
1032 vpxor (5 * 32 + 16)(%rdx), %ymm1, %ymm1;
1033 vpxor (6 * 32 + 16)(%rdx), %ymm0, %ymm0;
1034 vpxor (7 * 32 + 16)(%rdx), %ymm15, %ymm15;
1035 vpxor (8 * 32 + 16)(%rdx), %ymm14, %ymm14;
1036 vpxor (9 * 32 + 16)(%rdx), %ymm13, %ymm13;
1037 vpxor (10 * 32 + 16)(%rdx), %ymm12, %ymm12;
1038 vpxor (11 * 32 + 16)(%rdx), %ymm11, %ymm11;
1039 vpxor (12 * 32 + 16)(%rdx), %ymm10, %ymm10;
1040 vpxor (13 * 32 + 16)(%rdx), %ymm9, %ymm9;
1041 vpxor (14 * 32 + 16)(%rdx), %ymm8, %ymm8;
1042 write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0,
1043 %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9,
1044 %ymm8, %rsi);
1045
1046 vzeroupper;
1047
Josh Poimboeufdabe5162021-02-24 10:29:18 -06001048 addq $(16 * 32), %rsp;
Josh Poimboeuf8691ccd2016-01-21 16:49:19 -06001049 FRAME_END
Peter Zijlstraf94909c2021-12-04 14:43:40 +01001050 RET;
Jiri Slaby6dcc5622019-10-11 13:51:04 +02001051SYM_FUNC_END(camellia_cbc_dec_32way)