blob: 17417eee0866461fe0a0cd595c89c7778efd9809 [file] [log] [blame]
Thomas Gleixnerdd165a62019-05-20 19:08:13 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/* -*- linux-c -*- ------------------------------------------------------- *
3 *
4 * Copyright 2002 H. Peter Anvin - All Rights Reserved
5 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * ----------------------------------------------------------------------- */
7
8/*
NeilBrowna8e026c2010-08-12 06:44:54 +10009 * raid6/algos.c
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 *
11 * Algorithm list and algorithm selection for RAID-6
12 */
13
Dan Williamsf701d582009-03-31 15:09:39 +110014#include <linux/raid/pq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#ifndef __KERNEL__
16#include <sys/mman.h>
H. Peter Anvind7e70ba2005-09-16 19:27:29 -070017#include <stdio.h>
Dan Williamsf701d582009-03-31 15:09:39 +110018#else
Jim Kukunasf674ef72012-05-22 13:54:16 +100019#include <linux/module.h>
NeilBrownd5302fe2010-08-12 06:38:24 +100020#include <linux/gfp.h>
Dan Williamsf701d582009-03-31 15:09:39 +110021#if !RAID6_USE_EMPTY_ZERO_PAGE
22/* In .bss so it's zeroed */
23const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
24EXPORT_SYMBOL(raid6_empty_zero_page);
25#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#endif
27
28struct raid6_calls raid6_call;
Dan Williamsf701d582009-03-31 15:09:39 +110029EXPORT_SYMBOL_GPL(raid6_call);
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Linus Torvalds1da177e2005-04-16 15:20:36 -070031const struct raid6_calls * const raid6_algos[] = {
Al Viroca5cd872007-10-29 04:31:16 +000032#if defined(__i386__) && !defined(__arch_um__)
Gayatri Kammelae0a491c2016-08-12 18:03:19 -070033#ifdef CONFIG_AS_AVX512
Gayatri Kammelae0a491c2016-08-12 18:03:19 -070034 &raid6_avx512x2,
Daniel Verkamp0437de42018-11-12 15:26:51 -080035 &raid6_avx512x1,
Gayatri Kammelae0a491c2016-08-12 18:03:19 -070036#endif
Daniel Verkamp0437de42018-11-12 15:26:51 -080037#ifdef CONFIG_AS_AVX2
38 &raid6_avx2x2,
39 &raid6_avx2x1,
40#endif
41 &raid6_sse2x2,
42 &raid6_sse2x1,
43 &raid6_sse1x2,
44 &raid6_sse1x1,
45 &raid6_mmxx2,
46 &raid6_mmxx1,
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#endif
Al Viroca5cd872007-10-29 04:31:16 +000048#if defined(__x86_64__) && !defined(__arch_um__)
Gayatri Kammelae0a491c2016-08-12 18:03:19 -070049#ifdef CONFIG_AS_AVX512
Gayatri Kammelae0a491c2016-08-12 18:03:19 -070050 &raid6_avx512x4,
Daniel Verkamp0437de42018-11-12 15:26:51 -080051 &raid6_avx512x2,
52 &raid6_avx512x1,
Gayatri Kammelae0a491c2016-08-12 18:03:19 -070053#endif
Daniel Verkamp0437de42018-11-12 15:26:51 -080054#ifdef CONFIG_AS_AVX2
55 &raid6_avx2x4,
56 &raid6_avx2x2,
57 &raid6_avx2x1,
58#endif
59 &raid6_sse2x4,
60 &raid6_sse2x2,
61 &raid6_sse2x1,
Linus Torvalds1da177e2005-04-16 15:20:36 -070062#endif
63#ifdef CONFIG_ALTIVEC
Matt Brown751ba792017-08-04 13:42:32 +100064 &raid6_vpermxor8,
Daniel Verkamp0437de42018-11-12 15:26:51 -080065 &raid6_vpermxor4,
66 &raid6_vpermxor2,
67 &raid6_vpermxor1,
68 &raid6_altivec8,
69 &raid6_altivec4,
70 &raid6_altivec2,
71 &raid6_altivec1,
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#endif
Martin Schwidefsky474fd6e2016-08-23 13:30:24 +020073#if defined(CONFIG_S390)
74 &raid6_s390vx8,
75#endif
Ard Biesheuvel7d119652013-05-16 17:20:32 +020076#ifdef CONFIG_KERNEL_MODE_NEON
Ard Biesheuvel7d119652013-05-16 17:20:32 +020077 &raid6_neonx8,
Daniel Verkamp0437de42018-11-12 15:26:51 -080078 &raid6_neonx4,
79 &raid6_neonx2,
80 &raid6_neonx1,
Ard Biesheuvel7d119652013-05-16 17:20:32 +020081#endif
Daniel Verkamp0437de42018-11-12 15:26:51 -080082#if defined(__ia64__)
83 &raid6_intx32,
84 &raid6_intx16,
85#endif
86 &raid6_intx8,
87 &raid6_intx4,
88 &raid6_intx2,
89 &raid6_intx1,
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 NULL
91};
92
Jim Kukunas048a8b82012-05-22 13:54:18 +100093void (*raid6_2data_recov)(int, size_t, int, int, void **);
94EXPORT_SYMBOL_GPL(raid6_2data_recov);
95
96void (*raid6_datap_recov)(int, size_t, int, void **);
97EXPORT_SYMBOL_GPL(raid6_datap_recov);
98
99const struct raid6_recov_calls *const raid6_recov_algos[] = {
Gayatri Kammela13c520b2016-08-12 18:03:20 -0700100#ifdef CONFIG_AS_AVX512
101 &raid6_recov_avx512,
102#endif
Jim Kukunas70567412012-11-08 13:47:44 -0800103#ifdef CONFIG_AS_AVX2
104 &raid6_recov_avx2,
105#endif
Jan Beulich75aaf4c2015-01-23 08:29:50 +0000106#ifdef CONFIG_AS_SSSE3
Jim Kukunas048a8b82012-05-22 13:54:18 +1000107 &raid6_recov_ssse3,
108#endif
Martin Schwidefskyf5b55fa2016-08-31 09:27:35 +0200109#ifdef CONFIG_S390
110 &raid6_recov_s390xc,
111#endif
Ard Biesheuvel6ec4e2512017-07-13 18:16:01 +0100112#if defined(CONFIG_KERNEL_MODE_NEON)
113 &raid6_recov_neon,
114#endif
Jim Kukunas048a8b82012-05-22 13:54:18 +1000115 &raid6_recov_intx1,
116 NULL
117};
118
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119#ifdef __KERNEL__
120#define RAID6_TIME_JIFFIES_LG2 4
121#else
122/* Need more time to be stable in userspace */
123#define RAID6_TIME_JIFFIES_LG2 9
Dan Williamsf701d582009-03-31 15:09:39 +1100124#define time_before(x, y) ((x) < (y))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125#endif
126
Jim Kukunas96e67702012-05-22 13:54:24 +1000127static inline const struct raid6_recov_calls *raid6_choose_recov(void)
Jim Kukunas048a8b82012-05-22 13:54:18 +1000128{
129 const struct raid6_recov_calls *const *algo;
130 const struct raid6_recov_calls *best;
131
132 for (best = NULL, algo = raid6_recov_algos; *algo; algo++)
133 if (!best || (*algo)->priority > best->priority)
134 if (!(*algo)->valid || (*algo)->valid())
135 best = *algo;
136
137 if (best) {
138 raid6_2data_recov = best->data2;
139 raid6_datap_recov = best->datap;
140
Anton Blanchardb395f752014-10-13 23:03:16 +1100141 pr_info("raid6: using %s recovery algorithm\n", best->name);
Jim Kukunas048a8b82012-05-22 13:54:18 +1000142 } else
Anton Blanchardb395f752014-10-13 23:03:16 +1100143 pr_err("raid6: Yikes! No recovery algorithm found!\n");
Jim Kukunas96e67702012-05-22 13:54:24 +1000144
145 return best;
Jim Kukunas048a8b82012-05-22 13:54:18 +1000146}
147
Jim Kukunas96e67702012-05-22 13:54:24 +1000148static inline const struct raid6_calls *raid6_choose_gen(
149 void *(*const dptrs)[(65536/PAGE_SIZE)+2], const int disks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150{
Markus Stockhausenfe5cbc62014-12-15 12:57:04 +1100151 unsigned long perf, bestgenperf, bestxorperf, j0, j1;
152 int start = (disks>>1)-1, stop = disks-3; /* work on the second half of the disks */
Jim Kukunas96e67702012-05-22 13:54:24 +1000153 const struct raid6_calls *const *algo;
154 const struct raid6_calls *best;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Markus Stockhausenfe5cbc62014-12-15 12:57:04 +1100156 for (bestgenperf = 0, bestxorperf = 0, best = NULL, algo = raid6_algos; *algo; algo++) {
Jim Kukunas96e67702012-05-22 13:54:24 +1000157 if (!best || (*algo)->prefer >= best->prefer) {
158 if ((*algo)->valid && !(*algo)->valid())
159 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
Daniel Verkampbe85f932018-11-12 15:26:52 -0800161 if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
162 best = *algo;
163 break;
164 }
165
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 perf = 0;
167
168 preempt_disable();
169 j0 = jiffies;
Jim Kukunas96e67702012-05-22 13:54:24 +1000170 while ((j1 = jiffies) == j0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 cpu_relax();
Julia Lawall62b05592008-04-28 02:15:56 -0700172 while (time_before(jiffies,
173 j1 + (1<<RAID6_TIME_JIFFIES_LG2))) {
Jim Kukunas96e67702012-05-22 13:54:24 +1000174 (*algo)->gen_syndrome(disks, PAGE_SIZE, *dptrs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 perf++;
176 }
177 preempt_enable();
178
Markus Stockhausenfe5cbc62014-12-15 12:57:04 +1100179 if (perf > bestgenperf) {
180 bestgenperf = perf;
Jim Kukunas96e67702012-05-22 13:54:24 +1000181 best = *algo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 }
Markus Stockhausenfe5cbc62014-12-15 12:57:04 +1100183 pr_info("raid6: %-8s gen() %5ld MB/s\n", (*algo)->name,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 (perf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
Markus Stockhausenfe5cbc62014-12-15 12:57:04 +1100185
186 if (!(*algo)->xor_syndrome)
187 continue;
188
189 perf = 0;
190
191 preempt_disable();
192 j0 = jiffies;
193 while ((j1 = jiffies) == j0)
194 cpu_relax();
195 while (time_before(jiffies,
196 j1 + (1<<RAID6_TIME_JIFFIES_LG2))) {
197 (*algo)->xor_syndrome(disks, start, stop,
198 PAGE_SIZE, *dptrs);
199 perf++;
200 }
201 preempt_enable();
202
203 if (best == *algo)
204 bestxorperf = perf;
205
206 pr_info("raid6: %-8s xor() %5ld MB/s\n", (*algo)->name,
207 (perf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2+1));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 }
209 }
210
Adrian Bunka5d68392006-06-23 02:05:59 -0700211 if (best) {
Markus Stockhausenfe5cbc62014-12-15 12:57:04 +1100212 pr_info("raid6: using algorithm %s gen() %ld MB/s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 best->name,
Markus Stockhausenfe5cbc62014-12-15 12:57:04 +1100214 (bestgenperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
215 if (best->xor_syndrome)
216 pr_info("raid6: .... xor() %ld MB/s, rmw enabled\n",
217 (bestxorperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2+1));
Adrian Bunka5d68392006-06-23 02:05:59 -0700218 raid6_call = *best;
219 } else
Anton Blanchardb395f752014-10-13 23:03:16 +1100220 pr_err("raid6: Yikes! No algorithm found!\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
Jim Kukunas96e67702012-05-22 13:54:24 +1000222 return best;
223}
224
225
226/* Try to pick the best algorithm */
227/* This code uses the gfmul table as convenient data set to abuse */
228
229int __init raid6_select_algo(void)
230{
231 const int disks = (65536/PAGE_SIZE)+2;
232
233 const struct raid6_calls *gen_best;
234 const struct raid6_recov_calls *rec_best;
235 char *syndromes;
236 void *dptrs[(65536/PAGE_SIZE)+2];
237 int i;
238
239 for (i = 0; i < disks-2; i++)
240 dptrs[i] = ((char *)raid6_gfmul) + PAGE_SIZE*i;
241
242 /* Normal code - use a 2-page allocation to avoid D$ conflict */
243 syndromes = (void *) __get_free_pages(GFP_KERNEL, 1);
244
245 if (!syndromes) {
Anton Blanchardb395f752014-10-13 23:03:16 +1100246 pr_err("raid6: Yikes! No memory available.\n");
Jim Kukunas96e67702012-05-22 13:54:24 +1000247 return -ENOMEM;
248 }
249
250 dptrs[disks-2] = syndromes;
251 dptrs[disks-1] = syndromes + PAGE_SIZE;
252
253 /* select raid gen_syndrome function */
254 gen_best = raid6_choose_gen(&dptrs, disks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
Jim Kukunas048a8b82012-05-22 13:54:18 +1000256 /* select raid recover functions */
Jim Kukunas96e67702012-05-22 13:54:24 +1000257 rec_best = raid6_choose_recov();
Jim Kukunas048a8b82012-05-22 13:54:18 +1000258
Jim Kukunas96e67702012-05-22 13:54:24 +1000259 free_pages((unsigned long)syndromes, 1);
260
261 return gen_best && rec_best ? 0 : -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262}
Dan Williamsf701d582009-03-31 15:09:39 +1100263
264static void raid6_exit(void)
265{
266 do { } while (0);
267}
268
269subsys_initcall(raid6_select_algo);
270module_exit(raid6_exit);
271MODULE_LICENSE("GPL");
NeilBrown0efb9e62009-12-14 12:49:58 +1100272MODULE_DESCRIPTION("RAID6 Q-syndrome calculations");