Thomas Gleixner | dd165a6 | 2019-05-20 19:08:13 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* -*- linux-c -*- ------------------------------------------------------- * |
| 3 | * |
| 4 | * Copyright 2002 H. Peter Anvin - All Rights Reserved |
| 5 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * ----------------------------------------------------------------------- */ |
| 7 | |
| 8 | /* |
NeilBrown | a8e026c | 2010-08-12 06:44:54 +1000 | [diff] [blame] | 9 | * raid6/algos.c |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | * |
| 11 | * Algorithm list and algorithm selection for RAID-6 |
| 12 | */ |
| 13 | |
Dan Williams | f701d58 | 2009-03-31 15:09:39 +1100 | [diff] [blame] | 14 | #include <linux/raid/pq.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #ifndef __KERNEL__ |
| 16 | #include <sys/mman.h> |
H. Peter Anvin | d7e70ba | 2005-09-16 19:27:29 -0700 | [diff] [blame] | 17 | #include <stdio.h> |
Dan Williams | f701d58 | 2009-03-31 15:09:39 +1100 | [diff] [blame] | 18 | #else |
Jim Kukunas | f674ef7 | 2012-05-22 13:54:16 +1000 | [diff] [blame] | 19 | #include <linux/module.h> |
NeilBrown | d5302fe | 2010-08-12 06:38:24 +1000 | [diff] [blame] | 20 | #include <linux/gfp.h> |
Dan Williams | f701d58 | 2009-03-31 15:09:39 +1100 | [diff] [blame] | 21 | #if !RAID6_USE_EMPTY_ZERO_PAGE |
| 22 | /* In .bss so it's zeroed */ |
| 23 | const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); |
| 24 | EXPORT_SYMBOL(raid6_empty_zero_page); |
| 25 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #endif |
| 27 | |
| 28 | struct raid6_calls raid6_call; |
Dan Williams | f701d58 | 2009-03-31 15:09:39 +1100 | [diff] [blame] | 29 | EXPORT_SYMBOL_GPL(raid6_call); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | const struct raid6_calls * const raid6_algos[] = { |
Al Viro | ca5cd87 | 2007-10-29 04:31:16 +0000 | [diff] [blame] | 32 | #if defined(__i386__) && !defined(__arch_um__) |
Gayatri Kammela | e0a491c | 2016-08-12 18:03:19 -0700 | [diff] [blame] | 33 | #ifdef CONFIG_AS_AVX512 |
Gayatri Kammela | e0a491c | 2016-08-12 18:03:19 -0700 | [diff] [blame] | 34 | &raid6_avx512x2, |
Daniel Verkamp | 0437de4 | 2018-11-12 15:26:51 -0800 | [diff] [blame] | 35 | &raid6_avx512x1, |
Gayatri Kammela | e0a491c | 2016-08-12 18:03:19 -0700 | [diff] [blame] | 36 | #endif |
Daniel Verkamp | 0437de4 | 2018-11-12 15:26:51 -0800 | [diff] [blame] | 37 | #ifdef CONFIG_AS_AVX2 |
| 38 | &raid6_avx2x2, |
| 39 | &raid6_avx2x1, |
| 40 | #endif |
| 41 | &raid6_sse2x2, |
| 42 | &raid6_sse2x1, |
| 43 | &raid6_sse1x2, |
| 44 | &raid6_sse1x1, |
| 45 | &raid6_mmxx2, |
| 46 | &raid6_mmxx1, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | #endif |
Al Viro | ca5cd87 | 2007-10-29 04:31:16 +0000 | [diff] [blame] | 48 | #if defined(__x86_64__) && !defined(__arch_um__) |
Gayatri Kammela | e0a491c | 2016-08-12 18:03:19 -0700 | [diff] [blame] | 49 | #ifdef CONFIG_AS_AVX512 |
Gayatri Kammela | e0a491c | 2016-08-12 18:03:19 -0700 | [diff] [blame] | 50 | &raid6_avx512x4, |
Daniel Verkamp | 0437de4 | 2018-11-12 15:26:51 -0800 | [diff] [blame] | 51 | &raid6_avx512x2, |
| 52 | &raid6_avx512x1, |
Gayatri Kammela | e0a491c | 2016-08-12 18:03:19 -0700 | [diff] [blame] | 53 | #endif |
Daniel Verkamp | 0437de4 | 2018-11-12 15:26:51 -0800 | [diff] [blame] | 54 | #ifdef CONFIG_AS_AVX2 |
| 55 | &raid6_avx2x4, |
| 56 | &raid6_avx2x2, |
| 57 | &raid6_avx2x1, |
| 58 | #endif |
| 59 | &raid6_sse2x4, |
| 60 | &raid6_sse2x2, |
| 61 | &raid6_sse2x1, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | #endif |
| 63 | #ifdef CONFIG_ALTIVEC |
Matt Brown | 751ba79 | 2017-08-04 13:42:32 +1000 | [diff] [blame] | 64 | &raid6_vpermxor8, |
Daniel Verkamp | 0437de4 | 2018-11-12 15:26:51 -0800 | [diff] [blame] | 65 | &raid6_vpermxor4, |
| 66 | &raid6_vpermxor2, |
| 67 | &raid6_vpermxor1, |
| 68 | &raid6_altivec8, |
| 69 | &raid6_altivec4, |
| 70 | &raid6_altivec2, |
| 71 | &raid6_altivec1, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | #endif |
Martin Schwidefsky | 474fd6e | 2016-08-23 13:30:24 +0200 | [diff] [blame] | 73 | #if defined(CONFIG_S390) |
| 74 | &raid6_s390vx8, |
| 75 | #endif |
Ard Biesheuvel | 7d11965 | 2013-05-16 17:20:32 +0200 | [diff] [blame] | 76 | #ifdef CONFIG_KERNEL_MODE_NEON |
Ard Biesheuvel | 7d11965 | 2013-05-16 17:20:32 +0200 | [diff] [blame] | 77 | &raid6_neonx8, |
Daniel Verkamp | 0437de4 | 2018-11-12 15:26:51 -0800 | [diff] [blame] | 78 | &raid6_neonx4, |
| 79 | &raid6_neonx2, |
| 80 | &raid6_neonx1, |
Ard Biesheuvel | 7d11965 | 2013-05-16 17:20:32 +0200 | [diff] [blame] | 81 | #endif |
Daniel Verkamp | 0437de4 | 2018-11-12 15:26:51 -0800 | [diff] [blame] | 82 | #if defined(__ia64__) |
| 83 | &raid6_intx32, |
| 84 | &raid6_intx16, |
| 85 | #endif |
| 86 | &raid6_intx8, |
| 87 | &raid6_intx4, |
| 88 | &raid6_intx2, |
| 89 | &raid6_intx1, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | NULL |
| 91 | }; |
| 92 | |
Jim Kukunas | 048a8b8 | 2012-05-22 13:54:18 +1000 | [diff] [blame] | 93 | void (*raid6_2data_recov)(int, size_t, int, int, void **); |
| 94 | EXPORT_SYMBOL_GPL(raid6_2data_recov); |
| 95 | |
| 96 | void (*raid6_datap_recov)(int, size_t, int, void **); |
| 97 | EXPORT_SYMBOL_GPL(raid6_datap_recov); |
| 98 | |
| 99 | const struct raid6_recov_calls *const raid6_recov_algos[] = { |
Gayatri Kammela | 13c520b | 2016-08-12 18:03:20 -0700 | [diff] [blame] | 100 | #ifdef CONFIG_AS_AVX512 |
| 101 | &raid6_recov_avx512, |
| 102 | #endif |
Jim Kukunas | 7056741 | 2012-11-08 13:47:44 -0800 | [diff] [blame] | 103 | #ifdef CONFIG_AS_AVX2 |
| 104 | &raid6_recov_avx2, |
| 105 | #endif |
Jan Beulich | 75aaf4c | 2015-01-23 08:29:50 +0000 | [diff] [blame] | 106 | #ifdef CONFIG_AS_SSSE3 |
Jim Kukunas | 048a8b8 | 2012-05-22 13:54:18 +1000 | [diff] [blame] | 107 | &raid6_recov_ssse3, |
| 108 | #endif |
Martin Schwidefsky | f5b55fa | 2016-08-31 09:27:35 +0200 | [diff] [blame] | 109 | #ifdef CONFIG_S390 |
| 110 | &raid6_recov_s390xc, |
| 111 | #endif |
Ard Biesheuvel | 6ec4e251 | 2017-07-13 18:16:01 +0100 | [diff] [blame] | 112 | #if defined(CONFIG_KERNEL_MODE_NEON) |
| 113 | &raid6_recov_neon, |
| 114 | #endif |
Jim Kukunas | 048a8b8 | 2012-05-22 13:54:18 +1000 | [diff] [blame] | 115 | &raid6_recov_intx1, |
| 116 | NULL |
| 117 | }; |
| 118 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | #ifdef __KERNEL__ |
| 120 | #define RAID6_TIME_JIFFIES_LG2 4 |
| 121 | #else |
| 122 | /* Need more time to be stable in userspace */ |
| 123 | #define RAID6_TIME_JIFFIES_LG2 9 |
Dan Williams | f701d58 | 2009-03-31 15:09:39 +1100 | [diff] [blame] | 124 | #define time_before(x, y) ((x) < (y)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | #endif |
| 126 | |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 127 | static inline const struct raid6_recov_calls *raid6_choose_recov(void) |
Jim Kukunas | 048a8b8 | 2012-05-22 13:54:18 +1000 | [diff] [blame] | 128 | { |
| 129 | const struct raid6_recov_calls *const *algo; |
| 130 | const struct raid6_recov_calls *best; |
| 131 | |
| 132 | for (best = NULL, algo = raid6_recov_algos; *algo; algo++) |
| 133 | if (!best || (*algo)->priority > best->priority) |
| 134 | if (!(*algo)->valid || (*algo)->valid()) |
| 135 | best = *algo; |
| 136 | |
| 137 | if (best) { |
| 138 | raid6_2data_recov = best->data2; |
| 139 | raid6_datap_recov = best->datap; |
| 140 | |
Anton Blanchard | b395f75 | 2014-10-13 23:03:16 +1100 | [diff] [blame] | 141 | pr_info("raid6: using %s recovery algorithm\n", best->name); |
Jim Kukunas | 048a8b8 | 2012-05-22 13:54:18 +1000 | [diff] [blame] | 142 | } else |
Anton Blanchard | b395f75 | 2014-10-13 23:03:16 +1100 | [diff] [blame] | 143 | pr_err("raid6: Yikes! No recovery algorithm found!\n"); |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 144 | |
| 145 | return best; |
Jim Kukunas | 048a8b8 | 2012-05-22 13:54:18 +1000 | [diff] [blame] | 146 | } |
| 147 | |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 148 | static inline const struct raid6_calls *raid6_choose_gen( |
| 149 | void *(*const dptrs)[(65536/PAGE_SIZE)+2], const int disks) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | { |
Markus Stockhausen | fe5cbc6 | 2014-12-15 12:57:04 +1100 | [diff] [blame] | 151 | unsigned long perf, bestgenperf, bestxorperf, j0, j1; |
| 152 | int start = (disks>>1)-1, stop = disks-3; /* work on the second half of the disks */ |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 153 | const struct raid6_calls *const *algo; |
| 154 | const struct raid6_calls *best; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | |
Markus Stockhausen | fe5cbc6 | 2014-12-15 12:57:04 +1100 | [diff] [blame] | 156 | for (bestgenperf = 0, bestxorperf = 0, best = NULL, algo = raid6_algos; *algo; algo++) { |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 157 | if (!best || (*algo)->prefer >= best->prefer) { |
| 158 | if ((*algo)->valid && !(*algo)->valid()) |
| 159 | continue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | |
Daniel Verkamp | be85f93 | 2018-11-12 15:26:52 -0800 | [diff] [blame] | 161 | if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) { |
| 162 | best = *algo; |
| 163 | break; |
| 164 | } |
| 165 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | perf = 0; |
| 167 | |
| 168 | preempt_disable(); |
| 169 | j0 = jiffies; |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 170 | while ((j1 = jiffies) == j0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | cpu_relax(); |
Julia Lawall | 62b0559 | 2008-04-28 02:15:56 -0700 | [diff] [blame] | 172 | while (time_before(jiffies, |
| 173 | j1 + (1<<RAID6_TIME_JIFFIES_LG2))) { |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 174 | (*algo)->gen_syndrome(disks, PAGE_SIZE, *dptrs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | perf++; |
| 176 | } |
| 177 | preempt_enable(); |
| 178 | |
Markus Stockhausen | fe5cbc6 | 2014-12-15 12:57:04 +1100 | [diff] [blame] | 179 | if (perf > bestgenperf) { |
| 180 | bestgenperf = perf; |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 181 | best = *algo; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | } |
Markus Stockhausen | fe5cbc6 | 2014-12-15 12:57:04 +1100 | [diff] [blame] | 183 | pr_info("raid6: %-8s gen() %5ld MB/s\n", (*algo)->name, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | (perf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2)); |
Markus Stockhausen | fe5cbc6 | 2014-12-15 12:57:04 +1100 | [diff] [blame] | 185 | |
| 186 | if (!(*algo)->xor_syndrome) |
| 187 | continue; |
| 188 | |
| 189 | perf = 0; |
| 190 | |
| 191 | preempt_disable(); |
| 192 | j0 = jiffies; |
| 193 | while ((j1 = jiffies) == j0) |
| 194 | cpu_relax(); |
| 195 | while (time_before(jiffies, |
| 196 | j1 + (1<<RAID6_TIME_JIFFIES_LG2))) { |
| 197 | (*algo)->xor_syndrome(disks, start, stop, |
| 198 | PAGE_SIZE, *dptrs); |
| 199 | perf++; |
| 200 | } |
| 201 | preempt_enable(); |
| 202 | |
| 203 | if (best == *algo) |
| 204 | bestxorperf = perf; |
| 205 | |
| 206 | pr_info("raid6: %-8s xor() %5ld MB/s\n", (*algo)->name, |
| 207 | (perf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2+1)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | } |
| 209 | } |
| 210 | |
Adrian Bunk | a5d6839 | 2006-06-23 02:05:59 -0700 | [diff] [blame] | 211 | if (best) { |
Markus Stockhausen | fe5cbc6 | 2014-12-15 12:57:04 +1100 | [diff] [blame] | 212 | pr_info("raid6: using algorithm %s gen() %ld MB/s\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | best->name, |
Markus Stockhausen | fe5cbc6 | 2014-12-15 12:57:04 +1100 | [diff] [blame] | 214 | (bestgenperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2)); |
| 215 | if (best->xor_syndrome) |
| 216 | pr_info("raid6: .... xor() %ld MB/s, rmw enabled\n", |
| 217 | (bestxorperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2+1)); |
Adrian Bunk | a5d6839 | 2006-06-23 02:05:59 -0700 | [diff] [blame] | 218 | raid6_call = *best; |
| 219 | } else |
Anton Blanchard | b395f75 | 2014-10-13 23:03:16 +1100 | [diff] [blame] | 220 | pr_err("raid6: Yikes! No algorithm found!\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 222 | return best; |
| 223 | } |
| 224 | |
| 225 | |
| 226 | /* Try to pick the best algorithm */ |
| 227 | /* This code uses the gfmul table as convenient data set to abuse */ |
| 228 | |
| 229 | int __init raid6_select_algo(void) |
| 230 | { |
| 231 | const int disks = (65536/PAGE_SIZE)+2; |
| 232 | |
| 233 | const struct raid6_calls *gen_best; |
| 234 | const struct raid6_recov_calls *rec_best; |
| 235 | char *syndromes; |
| 236 | void *dptrs[(65536/PAGE_SIZE)+2]; |
| 237 | int i; |
| 238 | |
| 239 | for (i = 0; i < disks-2; i++) |
| 240 | dptrs[i] = ((char *)raid6_gfmul) + PAGE_SIZE*i; |
| 241 | |
| 242 | /* Normal code - use a 2-page allocation to avoid D$ conflict */ |
| 243 | syndromes = (void *) __get_free_pages(GFP_KERNEL, 1); |
| 244 | |
| 245 | if (!syndromes) { |
Anton Blanchard | b395f75 | 2014-10-13 23:03:16 +1100 | [diff] [blame] | 246 | pr_err("raid6: Yikes! No memory available.\n"); |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 247 | return -ENOMEM; |
| 248 | } |
| 249 | |
| 250 | dptrs[disks-2] = syndromes; |
| 251 | dptrs[disks-1] = syndromes + PAGE_SIZE; |
| 252 | |
| 253 | /* select raid gen_syndrome function */ |
| 254 | gen_best = raid6_choose_gen(&dptrs, disks); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | |
Jim Kukunas | 048a8b8 | 2012-05-22 13:54:18 +1000 | [diff] [blame] | 256 | /* select raid recover functions */ |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 257 | rec_best = raid6_choose_recov(); |
Jim Kukunas | 048a8b8 | 2012-05-22 13:54:18 +1000 | [diff] [blame] | 258 | |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 259 | free_pages((unsigned long)syndromes, 1); |
| 260 | |
| 261 | return gen_best && rec_best ? 0 : -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | } |
Dan Williams | f701d58 | 2009-03-31 15:09:39 +1100 | [diff] [blame] | 263 | |
| 264 | static void raid6_exit(void) |
| 265 | { |
| 266 | do { } while (0); |
| 267 | } |
| 268 | |
| 269 | subsys_initcall(raid6_select_algo); |
| 270 | module_exit(raid6_exit); |
| 271 | MODULE_LICENSE("GPL"); |
NeilBrown | 0efb9e6 | 2009-12-14 12:49:58 +1100 | [diff] [blame] | 272 | MODULE_DESCRIPTION("RAID6 Q-syndrome calculations"); |