Thomas Gleixner | dd165a6 | 2019-05-20 19:08:13 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* -*- linux-c -*- ------------------------------------------------------- * |
| 3 | * |
| 4 | * Copyright 2002 H. Peter Anvin - All Rights Reserved |
| 5 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * ----------------------------------------------------------------------- */ |
| 7 | |
| 8 | /* |
NeilBrown | a8e026c | 2010-08-12 06:44:54 +1000 | [diff] [blame] | 9 | * raid6/algos.c |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | * |
| 11 | * Algorithm list and algorithm selection for RAID-6 |
| 12 | */ |
| 13 | |
Dan Williams | f701d58 | 2009-03-31 15:09:39 +1100 | [diff] [blame] | 14 | #include <linux/raid/pq.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #ifndef __KERNEL__ |
| 16 | #include <sys/mman.h> |
H. Peter Anvin | d7e70ba | 2005-09-16 19:27:29 -0700 | [diff] [blame] | 17 | #include <stdio.h> |
Dan Williams | f701d58 | 2009-03-31 15:09:39 +1100 | [diff] [blame] | 18 | #else |
Jim Kukunas | f674ef7 | 2012-05-22 13:54:16 +1000 | [diff] [blame] | 19 | #include <linux/module.h> |
NeilBrown | d5302fe | 2010-08-12 06:38:24 +1000 | [diff] [blame] | 20 | #include <linux/gfp.h> |
Dan Williams | f701d58 | 2009-03-31 15:09:39 +1100 | [diff] [blame] | 21 | #if !RAID6_USE_EMPTY_ZERO_PAGE |
| 22 | /* In .bss so it's zeroed */ |
| 23 | const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); |
| 24 | EXPORT_SYMBOL(raid6_empty_zero_page); |
| 25 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #endif |
| 27 | |
| 28 | struct raid6_calls raid6_call; |
Dan Williams | f701d58 | 2009-03-31 15:09:39 +1100 | [diff] [blame] | 29 | EXPORT_SYMBOL_GPL(raid6_call); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | const struct raid6_calls * const raid6_algos[] = { |
Al Viro | ca5cd87 | 2007-10-29 04:31:16 +0000 | [diff] [blame] | 32 | #if defined(__i386__) && !defined(__arch_um__) |
Gayatri Kammela | e0a491c | 2016-08-12 18:03:19 -0700 | [diff] [blame] | 33 | #ifdef CONFIG_AS_AVX512 |
Gayatri Kammela | e0a491c | 2016-08-12 18:03:19 -0700 | [diff] [blame] | 34 | &raid6_avx512x2, |
Daniel Verkamp | 0437de4 | 2018-11-12 15:26:51 -0800 | [diff] [blame] | 35 | &raid6_avx512x1, |
Gayatri Kammela | e0a491c | 2016-08-12 18:03:19 -0700 | [diff] [blame] | 36 | #endif |
Daniel Verkamp | 0437de4 | 2018-11-12 15:26:51 -0800 | [diff] [blame] | 37 | &raid6_avx2x2, |
| 38 | &raid6_avx2x1, |
Daniel Verkamp | 0437de4 | 2018-11-12 15:26:51 -0800 | [diff] [blame] | 39 | &raid6_sse2x2, |
| 40 | &raid6_sse2x1, |
| 41 | &raid6_sse1x2, |
| 42 | &raid6_sse1x1, |
| 43 | &raid6_mmxx2, |
| 44 | &raid6_mmxx1, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | #endif |
Al Viro | ca5cd87 | 2007-10-29 04:31:16 +0000 | [diff] [blame] | 46 | #if defined(__x86_64__) && !defined(__arch_um__) |
Gayatri Kammela | e0a491c | 2016-08-12 18:03:19 -0700 | [diff] [blame] | 47 | #ifdef CONFIG_AS_AVX512 |
Gayatri Kammela | e0a491c | 2016-08-12 18:03:19 -0700 | [diff] [blame] | 48 | &raid6_avx512x4, |
Daniel Verkamp | 0437de4 | 2018-11-12 15:26:51 -0800 | [diff] [blame] | 49 | &raid6_avx512x2, |
| 50 | &raid6_avx512x1, |
Gayatri Kammela | e0a491c | 2016-08-12 18:03:19 -0700 | [diff] [blame] | 51 | #endif |
Daniel Verkamp | 0437de4 | 2018-11-12 15:26:51 -0800 | [diff] [blame] | 52 | &raid6_avx2x4, |
| 53 | &raid6_avx2x2, |
| 54 | &raid6_avx2x1, |
Daniel Verkamp | 0437de4 | 2018-11-12 15:26:51 -0800 | [diff] [blame] | 55 | &raid6_sse2x4, |
| 56 | &raid6_sse2x2, |
| 57 | &raid6_sse2x1, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | #endif |
| 59 | #ifdef CONFIG_ALTIVEC |
Matt Brown | 751ba79 | 2017-08-04 13:42:32 +1000 | [diff] [blame] | 60 | &raid6_vpermxor8, |
Daniel Verkamp | 0437de4 | 2018-11-12 15:26:51 -0800 | [diff] [blame] | 61 | &raid6_vpermxor4, |
| 62 | &raid6_vpermxor2, |
| 63 | &raid6_vpermxor1, |
| 64 | &raid6_altivec8, |
| 65 | &raid6_altivec4, |
| 66 | &raid6_altivec2, |
| 67 | &raid6_altivec1, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | #endif |
Martin Schwidefsky | 474fd6e | 2016-08-23 13:30:24 +0200 | [diff] [blame] | 69 | #if defined(CONFIG_S390) |
| 70 | &raid6_s390vx8, |
| 71 | #endif |
Ard Biesheuvel | 7d11965 | 2013-05-16 17:20:32 +0200 | [diff] [blame] | 72 | #ifdef CONFIG_KERNEL_MODE_NEON |
Ard Biesheuvel | 7d11965 | 2013-05-16 17:20:32 +0200 | [diff] [blame] | 73 | &raid6_neonx8, |
Daniel Verkamp | 0437de4 | 2018-11-12 15:26:51 -0800 | [diff] [blame] | 74 | &raid6_neonx4, |
| 75 | &raid6_neonx2, |
| 76 | &raid6_neonx1, |
Ard Biesheuvel | 7d11965 | 2013-05-16 17:20:32 +0200 | [diff] [blame] | 77 | #endif |
Daniel Verkamp | 0437de4 | 2018-11-12 15:26:51 -0800 | [diff] [blame] | 78 | #if defined(__ia64__) |
| 79 | &raid6_intx32, |
| 80 | &raid6_intx16, |
| 81 | #endif |
| 82 | &raid6_intx8, |
| 83 | &raid6_intx4, |
| 84 | &raid6_intx2, |
| 85 | &raid6_intx1, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | NULL |
| 87 | }; |
| 88 | |
Jim Kukunas | 048a8b8 | 2012-05-22 13:54:18 +1000 | [diff] [blame] | 89 | void (*raid6_2data_recov)(int, size_t, int, int, void **); |
| 90 | EXPORT_SYMBOL_GPL(raid6_2data_recov); |
| 91 | |
| 92 | void (*raid6_datap_recov)(int, size_t, int, void **); |
| 93 | EXPORT_SYMBOL_GPL(raid6_datap_recov); |
| 94 | |
| 95 | const struct raid6_recov_calls *const raid6_recov_algos[] = { |
Masahiro Yamada | 92203b0 | 2020-03-26 17:00:54 +0900 | [diff] [blame] | 96 | #ifdef CONFIG_X86 |
Gayatri Kammela | 13c520b | 2016-08-12 18:03:20 -0700 | [diff] [blame] | 97 | #ifdef CONFIG_AS_AVX512 |
| 98 | &raid6_recov_avx512, |
| 99 | #endif |
Jim Kukunas | 7056741 | 2012-11-08 13:47:44 -0800 | [diff] [blame] | 100 | &raid6_recov_avx2, |
Jim Kukunas | 048a8b8 | 2012-05-22 13:54:18 +1000 | [diff] [blame] | 101 | &raid6_recov_ssse3, |
| 102 | #endif |
Martin Schwidefsky | f5b55fa | 2016-08-31 09:27:35 +0200 | [diff] [blame] | 103 | #ifdef CONFIG_S390 |
| 104 | &raid6_recov_s390xc, |
| 105 | #endif |
Ard Biesheuvel | 6ec4e251 | 2017-07-13 18:16:01 +0100 | [diff] [blame] | 106 | #if defined(CONFIG_KERNEL_MODE_NEON) |
| 107 | &raid6_recov_neon, |
| 108 | #endif |
Jim Kukunas | 048a8b8 | 2012-05-22 13:54:18 +1000 | [diff] [blame] | 109 | &raid6_recov_intx1, |
| 110 | NULL |
| 111 | }; |
| 112 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | #ifdef __KERNEL__ |
| 114 | #define RAID6_TIME_JIFFIES_LG2 4 |
| 115 | #else |
| 116 | /* Need more time to be stable in userspace */ |
| 117 | #define RAID6_TIME_JIFFIES_LG2 9 |
Dan Williams | f701d58 | 2009-03-31 15:09:39 +1100 | [diff] [blame] | 118 | #define time_before(x, y) ((x) < (y)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | #endif |
| 120 | |
Zhengyuan Liu | f591df3 | 2019-12-20 10:21:28 +0800 | [diff] [blame] | 121 | #define RAID6_TEST_DISKS 8 |
| 122 | #define RAID6_TEST_DISKS_ORDER 3 |
| 123 | |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 124 | static inline const struct raid6_recov_calls *raid6_choose_recov(void) |
Jim Kukunas | 048a8b8 | 2012-05-22 13:54:18 +1000 | [diff] [blame] | 125 | { |
| 126 | const struct raid6_recov_calls *const *algo; |
| 127 | const struct raid6_recov_calls *best; |
| 128 | |
| 129 | for (best = NULL, algo = raid6_recov_algos; *algo; algo++) |
| 130 | if (!best || (*algo)->priority > best->priority) |
| 131 | if (!(*algo)->valid || (*algo)->valid()) |
| 132 | best = *algo; |
| 133 | |
| 134 | if (best) { |
| 135 | raid6_2data_recov = best->data2; |
| 136 | raid6_datap_recov = best->datap; |
| 137 | |
Anton Blanchard | b395f75 | 2014-10-13 23:03:16 +1100 | [diff] [blame] | 138 | pr_info("raid6: using %s recovery algorithm\n", best->name); |
Jim Kukunas | 048a8b8 | 2012-05-22 13:54:18 +1000 | [diff] [blame] | 139 | } else |
Anton Blanchard | b395f75 | 2014-10-13 23:03:16 +1100 | [diff] [blame] | 140 | pr_err("raid6: Yikes! No recovery algorithm found!\n"); |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 141 | |
| 142 | return best; |
Jim Kukunas | 048a8b8 | 2012-05-22 13:54:18 +1000 | [diff] [blame] | 143 | } |
| 144 | |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 145 | static inline const struct raid6_calls *raid6_choose_gen( |
Zhengyuan Liu | f591df3 | 2019-12-20 10:21:28 +0800 | [diff] [blame] | 146 | void *(*const dptrs)[RAID6_TEST_DISKS], const int disks) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | { |
Dirk Müller | 38640c4 | 2022-01-05 17:38:46 +0100 | [diff] [blame] | 148 | unsigned long perf, bestgenperf, j0, j1; |
Markus Stockhausen | fe5cbc6 | 2014-12-15 12:57:04 +1100 | [diff] [blame] | 149 | int start = (disks>>1)-1, stop = disks-3; /* work on the second half of the disks */ |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 150 | const struct raid6_calls *const *algo; |
| 151 | const struct raid6_calls *best; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | |
Dirk Müller | 38640c4 | 2022-01-05 17:38:46 +0100 | [diff] [blame] | 153 | for (bestgenperf = 0, best = NULL, algo = raid6_algos; *algo; algo++) { |
Dirk Müller | 36dacdd | 2022-01-05 17:38:47 +0100 | [diff] [blame] | 154 | if (!best || (*algo)->priority >= best->priority) { |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 155 | if ((*algo)->valid && !(*algo)->valid()) |
| 156 | continue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | |
Daniel Verkamp | be85f93 | 2018-11-12 15:26:52 -0800 | [diff] [blame] | 158 | if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) { |
| 159 | best = *algo; |
| 160 | break; |
| 161 | } |
| 162 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | perf = 0; |
| 164 | |
| 165 | preempt_disable(); |
| 166 | j0 = jiffies; |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 167 | while ((j1 = jiffies) == j0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | cpu_relax(); |
Julia Lawall | 62b0559 | 2008-04-28 02:15:56 -0700 | [diff] [blame] | 169 | while (time_before(jiffies, |
| 170 | j1 + (1<<RAID6_TIME_JIFFIES_LG2))) { |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 171 | (*algo)->gen_syndrome(disks, PAGE_SIZE, *dptrs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | perf++; |
| 173 | } |
| 174 | preempt_enable(); |
| 175 | |
Markus Stockhausen | fe5cbc6 | 2014-12-15 12:57:04 +1100 | [diff] [blame] | 176 | if (perf > bestgenperf) { |
| 177 | bestgenperf = perf; |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 178 | best = *algo; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | } |
Markus Stockhausen | fe5cbc6 | 2014-12-15 12:57:04 +1100 | [diff] [blame] | 180 | pr_info("raid6: %-8s gen() %5ld MB/s\n", (*algo)->name, |
Zhengyuan Liu | f591df3 | 2019-12-20 10:21:28 +0800 | [diff] [blame] | 181 | (perf * HZ * (disks-2)) >> |
| 182 | (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | } |
| 184 | } |
| 185 | |
Dirk Müller | 38640c4 | 2022-01-05 17:38:46 +0100 | [diff] [blame] | 186 | if (!best) { |
| 187 | pr_err("raid6: Yikes! No algorithm found!\n"); |
| 188 | goto out; |
| 189 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | |
Dirk Müller | 38640c4 | 2022-01-05 17:38:46 +0100 | [diff] [blame] | 191 | raid6_call = *best; |
| 192 | |
| 193 | if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) { |
| 194 | pr_info("raid6: skipped pq benchmark and selected %s\n", |
| 195 | best->name); |
| 196 | goto out; |
| 197 | } |
| 198 | |
| 199 | pr_info("raid6: using algorithm %s gen() %ld MB/s\n", |
| 200 | best->name, |
| 201 | (bestgenperf * HZ * (disks - 2)) >> |
| 202 | (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2)); |
| 203 | |
| 204 | if (best->xor_syndrome) { |
| 205 | perf = 0; |
| 206 | |
| 207 | preempt_disable(); |
| 208 | j0 = jiffies; |
| 209 | while ((j1 = jiffies) == j0) |
| 210 | cpu_relax(); |
| 211 | while (time_before(jiffies, |
| 212 | j1 + (1 << RAID6_TIME_JIFFIES_LG2))) { |
| 213 | best->xor_syndrome(disks, start, stop, |
| 214 | PAGE_SIZE, *dptrs); |
| 215 | perf++; |
| 216 | } |
| 217 | preempt_enable(); |
| 218 | |
| 219 | pr_info("raid6: .... xor() %ld MB/s, rmw enabled\n", |
| 220 | (perf * HZ * (disks - 2)) >> |
| 221 | (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1)); |
| 222 | } |
| 223 | |
| 224 | out: |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 225 | return best; |
| 226 | } |
| 227 | |
| 228 | |
| 229 | /* Try to pick the best algorithm */ |
| 230 | /* This code uses the gfmul table as convenient data set to abuse */ |
| 231 | |
| 232 | int __init raid6_select_algo(void) |
| 233 | { |
Zhengyuan Liu | f591df3 | 2019-12-20 10:21:28 +0800 | [diff] [blame] | 234 | const int disks = RAID6_TEST_DISKS; |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 235 | |
| 236 | const struct raid6_calls *gen_best; |
| 237 | const struct raid6_recov_calls *rec_best; |
Zhengyuan Liu | f591df3 | 2019-12-20 10:21:28 +0800 | [diff] [blame] | 238 | char *disk_ptr, *p; |
| 239 | void *dptrs[RAID6_TEST_DISKS]; |
| 240 | int i, cycle; |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 241 | |
Zhengyuan Liu | f591df3 | 2019-12-20 10:21:28 +0800 | [diff] [blame] | 242 | /* prepare the buffer and fill it circularly with gfmul table */ |
| 243 | disk_ptr = (char *)__get_free_pages(GFP_KERNEL, RAID6_TEST_DISKS_ORDER); |
| 244 | if (!disk_ptr) { |
Anton Blanchard | b395f75 | 2014-10-13 23:03:16 +1100 | [diff] [blame] | 245 | pr_err("raid6: Yikes! No memory available.\n"); |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 246 | return -ENOMEM; |
| 247 | } |
| 248 | |
Zhengyuan Liu | f591df3 | 2019-12-20 10:21:28 +0800 | [diff] [blame] | 249 | p = disk_ptr; |
| 250 | for (i = 0; i < disks; i++) |
| 251 | dptrs[i] = p + PAGE_SIZE * i; |
| 252 | |
| 253 | cycle = ((disks - 2) * PAGE_SIZE) / 65536; |
| 254 | for (i = 0; i < cycle; i++) { |
| 255 | memcpy(p, raid6_gfmul, 65536); |
| 256 | p += 65536; |
| 257 | } |
| 258 | |
| 259 | if ((disks - 2) * PAGE_SIZE % 65536) |
| 260 | memcpy(p, raid6_gfmul, (disks - 2) * PAGE_SIZE % 65536); |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 261 | |
| 262 | /* select raid gen_syndrome function */ |
| 263 | gen_best = raid6_choose_gen(&dptrs, disks); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | |
Jim Kukunas | 048a8b8 | 2012-05-22 13:54:18 +1000 | [diff] [blame] | 265 | /* select raid recover functions */ |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 266 | rec_best = raid6_choose_recov(); |
Jim Kukunas | 048a8b8 | 2012-05-22 13:54:18 +1000 | [diff] [blame] | 267 | |
Zhengyuan Liu | f591df3 | 2019-12-20 10:21:28 +0800 | [diff] [blame] | 268 | free_pages((unsigned long)disk_ptr, RAID6_TEST_DISKS_ORDER); |
Jim Kukunas | 96e6770 | 2012-05-22 13:54:24 +1000 | [diff] [blame] | 269 | |
| 270 | return gen_best && rec_best ? 0 : -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | } |
Dan Williams | f701d58 | 2009-03-31 15:09:39 +1100 | [diff] [blame] | 272 | |
| 273 | static void raid6_exit(void) |
| 274 | { |
| 275 | do { } while (0); |
| 276 | } |
| 277 | |
| 278 | subsys_initcall(raid6_select_algo); |
| 279 | module_exit(raid6_exit); |
| 280 | MODULE_LICENSE("GPL"); |
NeilBrown | 0efb9e6 | 2009-12-14 12:49:58 +1100 | [diff] [blame] | 281 | MODULE_DESCRIPTION("RAID6 Q-syndrome calculations"); |