blob: 39b74221f4a7cf5c72d2f409106783fba5b73e89 [file] [log] [blame]
Thomas Gleixnerdd165a62019-05-20 19:08:13 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/* -*- linux-c -*- ------------------------------------------------------- *
3 *
4 * Copyright 2002 H. Peter Anvin - All Rights Reserved
5 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * ----------------------------------------------------------------------- */
7
8/*
NeilBrowna8e026c2010-08-12 06:44:54 +10009 * raid6/algos.c
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 *
11 * Algorithm list and algorithm selection for RAID-6
12 */
13
Dan Williamsf701d582009-03-31 15:09:39 +110014#include <linux/raid/pq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#ifndef __KERNEL__
16#include <sys/mman.h>
H. Peter Anvind7e70ba2005-09-16 19:27:29 -070017#include <stdio.h>
Dan Williamsf701d582009-03-31 15:09:39 +110018#else
Jim Kukunasf674ef72012-05-22 13:54:16 +100019#include <linux/module.h>
NeilBrownd5302fe2010-08-12 06:38:24 +100020#include <linux/gfp.h>
Dan Williamsf701d582009-03-31 15:09:39 +110021#if !RAID6_USE_EMPTY_ZERO_PAGE
22/* In .bss so it's zeroed */
23const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
24EXPORT_SYMBOL(raid6_empty_zero_page);
25#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#endif
27
28struct raid6_calls raid6_call;
Dan Williamsf701d582009-03-31 15:09:39 +110029EXPORT_SYMBOL_GPL(raid6_call);
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Linus Torvalds1da177e2005-04-16 15:20:36 -070031const struct raid6_calls * const raid6_algos[] = {
Al Viroca5cd872007-10-29 04:31:16 +000032#if defined(__i386__) && !defined(__arch_um__)
Gayatri Kammelae0a491c2016-08-12 18:03:19 -070033#ifdef CONFIG_AS_AVX512
Gayatri Kammelae0a491c2016-08-12 18:03:19 -070034 &raid6_avx512x2,
Daniel Verkamp0437de42018-11-12 15:26:51 -080035 &raid6_avx512x1,
Gayatri Kammelae0a491c2016-08-12 18:03:19 -070036#endif
Daniel Verkamp0437de42018-11-12 15:26:51 -080037 &raid6_avx2x2,
38 &raid6_avx2x1,
Daniel Verkamp0437de42018-11-12 15:26:51 -080039 &raid6_sse2x2,
40 &raid6_sse2x1,
41 &raid6_sse1x2,
42 &raid6_sse1x1,
43 &raid6_mmxx2,
44 &raid6_mmxx1,
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#endif
Al Viroca5cd872007-10-29 04:31:16 +000046#if defined(__x86_64__) && !defined(__arch_um__)
Gayatri Kammelae0a491c2016-08-12 18:03:19 -070047#ifdef CONFIG_AS_AVX512
Gayatri Kammelae0a491c2016-08-12 18:03:19 -070048 &raid6_avx512x4,
Daniel Verkamp0437de42018-11-12 15:26:51 -080049 &raid6_avx512x2,
50 &raid6_avx512x1,
Gayatri Kammelae0a491c2016-08-12 18:03:19 -070051#endif
Daniel Verkamp0437de42018-11-12 15:26:51 -080052 &raid6_avx2x4,
53 &raid6_avx2x2,
54 &raid6_avx2x1,
Daniel Verkamp0437de42018-11-12 15:26:51 -080055 &raid6_sse2x4,
56 &raid6_sse2x2,
57 &raid6_sse2x1,
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#endif
59#ifdef CONFIG_ALTIVEC
Matt Brown751ba792017-08-04 13:42:32 +100060 &raid6_vpermxor8,
Daniel Verkamp0437de42018-11-12 15:26:51 -080061 &raid6_vpermxor4,
62 &raid6_vpermxor2,
63 &raid6_vpermxor1,
64 &raid6_altivec8,
65 &raid6_altivec4,
66 &raid6_altivec2,
67 &raid6_altivec1,
Linus Torvalds1da177e2005-04-16 15:20:36 -070068#endif
Martin Schwidefsky474fd6e2016-08-23 13:30:24 +020069#if defined(CONFIG_S390)
70 &raid6_s390vx8,
71#endif
Ard Biesheuvel7d119652013-05-16 17:20:32 +020072#ifdef CONFIG_KERNEL_MODE_NEON
Ard Biesheuvel7d119652013-05-16 17:20:32 +020073 &raid6_neonx8,
Daniel Verkamp0437de42018-11-12 15:26:51 -080074 &raid6_neonx4,
75 &raid6_neonx2,
76 &raid6_neonx1,
Ard Biesheuvel7d119652013-05-16 17:20:32 +020077#endif
Daniel Verkamp0437de42018-11-12 15:26:51 -080078#if defined(__ia64__)
79 &raid6_intx32,
80 &raid6_intx16,
81#endif
82 &raid6_intx8,
83 &raid6_intx4,
84 &raid6_intx2,
85 &raid6_intx1,
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 NULL
87};
88
Jim Kukunas048a8b82012-05-22 13:54:18 +100089void (*raid6_2data_recov)(int, size_t, int, int, void **);
90EXPORT_SYMBOL_GPL(raid6_2data_recov);
91
92void (*raid6_datap_recov)(int, size_t, int, void **);
93EXPORT_SYMBOL_GPL(raid6_datap_recov);
94
95const struct raid6_recov_calls *const raid6_recov_algos[] = {
Masahiro Yamada92203b02020-03-26 17:00:54 +090096#ifdef CONFIG_X86
Gayatri Kammela13c520b2016-08-12 18:03:20 -070097#ifdef CONFIG_AS_AVX512
98 &raid6_recov_avx512,
99#endif
Jim Kukunas70567412012-11-08 13:47:44 -0800100 &raid6_recov_avx2,
Jim Kukunas048a8b82012-05-22 13:54:18 +1000101 &raid6_recov_ssse3,
102#endif
Martin Schwidefskyf5b55fa2016-08-31 09:27:35 +0200103#ifdef CONFIG_S390
104 &raid6_recov_s390xc,
105#endif
Ard Biesheuvel6ec4e2512017-07-13 18:16:01 +0100106#if defined(CONFIG_KERNEL_MODE_NEON)
107 &raid6_recov_neon,
108#endif
Jim Kukunas048a8b82012-05-22 13:54:18 +1000109 &raid6_recov_intx1,
110 NULL
111};
112
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113#ifdef __KERNEL__
114#define RAID6_TIME_JIFFIES_LG2 4
115#else
116/* Need more time to be stable in userspace */
117#define RAID6_TIME_JIFFIES_LG2 9
Dan Williamsf701d582009-03-31 15:09:39 +1100118#define time_before(x, y) ((x) < (y))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119#endif
120
Zhengyuan Liuf591df32019-12-20 10:21:28 +0800121#define RAID6_TEST_DISKS 8
122#define RAID6_TEST_DISKS_ORDER 3
123
Jim Kukunas96e67702012-05-22 13:54:24 +1000124static inline const struct raid6_recov_calls *raid6_choose_recov(void)
Jim Kukunas048a8b82012-05-22 13:54:18 +1000125{
126 const struct raid6_recov_calls *const *algo;
127 const struct raid6_recov_calls *best;
128
129 for (best = NULL, algo = raid6_recov_algos; *algo; algo++)
130 if (!best || (*algo)->priority > best->priority)
131 if (!(*algo)->valid || (*algo)->valid())
132 best = *algo;
133
134 if (best) {
135 raid6_2data_recov = best->data2;
136 raid6_datap_recov = best->datap;
137
Anton Blanchardb395f752014-10-13 23:03:16 +1100138 pr_info("raid6: using %s recovery algorithm\n", best->name);
Jim Kukunas048a8b82012-05-22 13:54:18 +1000139 } else
Anton Blanchardb395f752014-10-13 23:03:16 +1100140 pr_err("raid6: Yikes! No recovery algorithm found!\n");
Jim Kukunas96e67702012-05-22 13:54:24 +1000141
142 return best;
Jim Kukunas048a8b82012-05-22 13:54:18 +1000143}
144
Jim Kukunas96e67702012-05-22 13:54:24 +1000145static inline const struct raid6_calls *raid6_choose_gen(
Zhengyuan Liuf591df32019-12-20 10:21:28 +0800146 void *(*const dptrs)[RAID6_TEST_DISKS], const int disks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147{
Dirk Müller38640c42022-01-05 17:38:46 +0100148 unsigned long perf, bestgenperf, j0, j1;
Markus Stockhausenfe5cbc62014-12-15 12:57:04 +1100149 int start = (disks>>1)-1, stop = disks-3; /* work on the second half of the disks */
Jim Kukunas96e67702012-05-22 13:54:24 +1000150 const struct raid6_calls *const *algo;
151 const struct raid6_calls *best;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
Dirk Müller38640c42022-01-05 17:38:46 +0100153 for (bestgenperf = 0, best = NULL, algo = raid6_algos; *algo; algo++) {
Dirk Müller36dacdd2022-01-05 17:38:47 +0100154 if (!best || (*algo)->priority >= best->priority) {
Jim Kukunas96e67702012-05-22 13:54:24 +1000155 if ((*algo)->valid && !(*algo)->valid())
156 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Daniel Verkampbe85f932018-11-12 15:26:52 -0800158 if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
159 best = *algo;
160 break;
161 }
162
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 perf = 0;
164
165 preempt_disable();
166 j0 = jiffies;
Jim Kukunas96e67702012-05-22 13:54:24 +1000167 while ((j1 = jiffies) == j0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 cpu_relax();
Julia Lawall62b05592008-04-28 02:15:56 -0700169 while (time_before(jiffies,
170 j1 + (1<<RAID6_TIME_JIFFIES_LG2))) {
Jim Kukunas96e67702012-05-22 13:54:24 +1000171 (*algo)->gen_syndrome(disks, PAGE_SIZE, *dptrs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 perf++;
173 }
174 preempt_enable();
175
Markus Stockhausenfe5cbc62014-12-15 12:57:04 +1100176 if (perf > bestgenperf) {
177 bestgenperf = perf;
Jim Kukunas96e67702012-05-22 13:54:24 +1000178 best = *algo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 }
Markus Stockhausenfe5cbc62014-12-15 12:57:04 +1100180 pr_info("raid6: %-8s gen() %5ld MB/s\n", (*algo)->name,
Zhengyuan Liuf591df32019-12-20 10:21:28 +0800181 (perf * HZ * (disks-2)) >>
182 (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 }
184 }
185
Dirk Müller38640c42022-01-05 17:38:46 +0100186 if (!best) {
187 pr_err("raid6: Yikes! No algorithm found!\n");
188 goto out;
189 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
Dirk Müller38640c42022-01-05 17:38:46 +0100191 raid6_call = *best;
192
193 if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
194 pr_info("raid6: skipped pq benchmark and selected %s\n",
195 best->name);
196 goto out;
197 }
198
199 pr_info("raid6: using algorithm %s gen() %ld MB/s\n",
200 best->name,
201 (bestgenperf * HZ * (disks - 2)) >>
202 (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2));
203
204 if (best->xor_syndrome) {
205 perf = 0;
206
207 preempt_disable();
208 j0 = jiffies;
209 while ((j1 = jiffies) == j0)
210 cpu_relax();
211 while (time_before(jiffies,
212 j1 + (1 << RAID6_TIME_JIFFIES_LG2))) {
213 best->xor_syndrome(disks, start, stop,
214 PAGE_SIZE, *dptrs);
215 perf++;
216 }
217 preempt_enable();
218
219 pr_info("raid6: .... xor() %ld MB/s, rmw enabled\n",
220 (perf * HZ * (disks - 2)) >>
221 (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1));
222 }
223
224out:
Jim Kukunas96e67702012-05-22 13:54:24 +1000225 return best;
226}
227
228
229/* Try to pick the best algorithm */
230/* This code uses the gfmul table as convenient data set to abuse */
231
232int __init raid6_select_algo(void)
233{
Zhengyuan Liuf591df32019-12-20 10:21:28 +0800234 const int disks = RAID6_TEST_DISKS;
Jim Kukunas96e67702012-05-22 13:54:24 +1000235
236 const struct raid6_calls *gen_best;
237 const struct raid6_recov_calls *rec_best;
Zhengyuan Liuf591df32019-12-20 10:21:28 +0800238 char *disk_ptr, *p;
239 void *dptrs[RAID6_TEST_DISKS];
240 int i, cycle;
Jim Kukunas96e67702012-05-22 13:54:24 +1000241
Zhengyuan Liuf591df32019-12-20 10:21:28 +0800242 /* prepare the buffer and fill it circularly with gfmul table */
243 disk_ptr = (char *)__get_free_pages(GFP_KERNEL, RAID6_TEST_DISKS_ORDER);
244 if (!disk_ptr) {
Anton Blanchardb395f752014-10-13 23:03:16 +1100245 pr_err("raid6: Yikes! No memory available.\n");
Jim Kukunas96e67702012-05-22 13:54:24 +1000246 return -ENOMEM;
247 }
248
Zhengyuan Liuf591df32019-12-20 10:21:28 +0800249 p = disk_ptr;
250 for (i = 0; i < disks; i++)
251 dptrs[i] = p + PAGE_SIZE * i;
252
253 cycle = ((disks - 2) * PAGE_SIZE) / 65536;
254 for (i = 0; i < cycle; i++) {
255 memcpy(p, raid6_gfmul, 65536);
256 p += 65536;
257 }
258
259 if ((disks - 2) * PAGE_SIZE % 65536)
260 memcpy(p, raid6_gfmul, (disks - 2) * PAGE_SIZE % 65536);
Jim Kukunas96e67702012-05-22 13:54:24 +1000261
262 /* select raid gen_syndrome function */
263 gen_best = raid6_choose_gen(&dptrs, disks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264
Jim Kukunas048a8b82012-05-22 13:54:18 +1000265 /* select raid recover functions */
Jim Kukunas96e67702012-05-22 13:54:24 +1000266 rec_best = raid6_choose_recov();
Jim Kukunas048a8b82012-05-22 13:54:18 +1000267
Zhengyuan Liuf591df32019-12-20 10:21:28 +0800268 free_pages((unsigned long)disk_ptr, RAID6_TEST_DISKS_ORDER);
Jim Kukunas96e67702012-05-22 13:54:24 +1000269
270 return gen_best && rec_best ? 0 : -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271}
Dan Williamsf701d582009-03-31 15:09:39 +1100272
273static void raid6_exit(void)
274{
275 do { } while (0);
276}
277
278subsys_initcall(raid6_select_algo);
279module_exit(raid6_exit);
280MODULE_LICENSE("GPL");
NeilBrown0efb9e62009-12-14 12:49:58 +1100281MODULE_DESCRIPTION("RAID6 Q-syndrome calculations");