Christoph Hellwig | 8c16567 | 2019-04-30 14:42:39 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Martin K. Petersen | 2341c2f8 | 2014-09-26 19:20:07 -0400 | [diff] [blame] | 2 | /* |
| 3 | * t10_pi.c - Functions for generating and verifying T10 Protection |
| 4 | * Information. |
Martin K. Petersen | 2341c2f8 | 2014-09-26 19:20:07 -0400 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #include <linux/t10-pi.h> |
Christoph Hellwig | fe45e63 | 2021-09-20 14:33:27 +0200 | [diff] [blame] | 8 | #include <linux/blk-integrity.h> |
Martin K. Petersen | 2341c2f8 | 2014-09-26 19:20:07 -0400 | [diff] [blame] | 9 | #include <linux/crc-t10dif.h> |
Herbert Xu | a754bd5 | 2019-12-23 16:13:51 +0800 | [diff] [blame] | 10 | #include <linux/module.h> |
Martin K. Petersen | 2341c2f8 | 2014-09-26 19:20:07 -0400 | [diff] [blame] | 11 | #include <net/checksum.h> |
| 12 | |
| 13 | typedef __be16 (csum_fn) (void *, unsigned int); |
| 14 | |
Martin K. Petersen | 2341c2f8 | 2014-09-26 19:20:07 -0400 | [diff] [blame] | 15 | static __be16 t10_pi_crc_fn(void *data, unsigned int len) |
| 16 | { |
| 17 | return cpu_to_be16(crc_t10dif(data, len)); |
| 18 | } |
| 19 | |
| 20 | static __be16 t10_pi_ip_fn(void *data, unsigned int len) |
| 21 | { |
| 22 | return (__force __be16)ip_compute_csum(data, len); |
| 23 | } |
| 24 | |
| 25 | /* |
| 26 | * Type 1 and Type 2 protection use the same format: 16 bit guard tag, |
| 27 | * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref |
| 28 | * tag. |
| 29 | */ |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 30 | static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter, |
Max Gurtovoy | 5eaed68 | 2019-09-16 18:44:28 +0300 | [diff] [blame] | 31 | csum_fn *fn, enum t10_dif_type type) |
Martin K. Petersen | 2341c2f8 | 2014-09-26 19:20:07 -0400 | [diff] [blame] | 32 | { |
| 33 | unsigned int i; |
| 34 | |
| 35 | for (i = 0 ; i < iter->data_size ; i += iter->interval) { |
| 36 | struct t10_pi_tuple *pi = iter->prot_buf; |
| 37 | |
| 38 | pi->guard_tag = fn(iter->data_buf, iter->interval); |
| 39 | pi->app_tag = 0; |
| 40 | |
Max Gurtovoy | 5eaed68 | 2019-09-16 18:44:28 +0300 | [diff] [blame] | 41 | if (type == T10_PI_TYPE1_PROTECTION) |
Martin K. Petersen | 2341c2f8 | 2014-09-26 19:20:07 -0400 | [diff] [blame] | 42 | pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed)); |
| 43 | else |
| 44 | pi->ref_tag = 0; |
| 45 | |
| 46 | iter->data_buf += iter->interval; |
| 47 | iter->prot_buf += sizeof(struct t10_pi_tuple); |
| 48 | iter->seed++; |
| 49 | } |
| 50 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 51 | return BLK_STS_OK; |
Martin K. Petersen | 2341c2f8 | 2014-09-26 19:20:07 -0400 | [diff] [blame] | 52 | } |
| 53 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 54 | static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter, |
Max Gurtovoy | 5eaed68 | 2019-09-16 18:44:28 +0300 | [diff] [blame] | 55 | csum_fn *fn, enum t10_dif_type type) |
Martin K. Petersen | 2341c2f8 | 2014-09-26 19:20:07 -0400 | [diff] [blame] | 56 | { |
| 57 | unsigned int i; |
| 58 | |
Max Gurtovoy | be21683 | 2019-09-22 12:46:55 +0300 | [diff] [blame] | 59 | BUG_ON(type == T10_PI_TYPE0_PROTECTION); |
| 60 | |
Martin K. Petersen | 2341c2f8 | 2014-09-26 19:20:07 -0400 | [diff] [blame] | 61 | for (i = 0 ; i < iter->data_size ; i += iter->interval) { |
| 62 | struct t10_pi_tuple *pi = iter->prot_buf; |
| 63 | __be16 csum; |
| 64 | |
Max Gurtovoy | be21683 | 2019-09-22 12:46:55 +0300 | [diff] [blame] | 65 | if (type == T10_PI_TYPE1_PROTECTION || |
| 66 | type == T10_PI_TYPE2_PROTECTION) { |
Dmitry Monakhov | 128b6f9 | 2017-06-29 11:31:12 -0700 | [diff] [blame] | 67 | if (pi->app_tag == T10_PI_APP_ESCAPE) |
Martin K. Petersen | 2341c2f8 | 2014-09-26 19:20:07 -0400 | [diff] [blame] | 68 | goto next; |
| 69 | |
| 70 | if (be32_to_cpu(pi->ref_tag) != |
| 71 | lower_32_bits(iter->seed)) { |
| 72 | pr_err("%s: ref tag error at location %llu " \ |
| 73 | "(rcvd %u)\n", iter->disk_name, |
| 74 | (unsigned long long) |
| 75 | iter->seed, be32_to_cpu(pi->ref_tag)); |
Bart Van Assche | a462b95 | 2017-06-13 08:07:33 -0700 | [diff] [blame] | 76 | return BLK_STS_PROTECTION; |
Martin K. Petersen | 2341c2f8 | 2014-09-26 19:20:07 -0400 | [diff] [blame] | 77 | } |
Max Gurtovoy | be21683 | 2019-09-22 12:46:55 +0300 | [diff] [blame] | 78 | } else if (type == T10_PI_TYPE3_PROTECTION) { |
Dmitry Monakhov | 128b6f9 | 2017-06-29 11:31:12 -0700 | [diff] [blame] | 79 | if (pi->app_tag == T10_PI_APP_ESCAPE && |
| 80 | pi->ref_tag == T10_PI_REF_ESCAPE) |
Martin K. Petersen | 2341c2f8 | 2014-09-26 19:20:07 -0400 | [diff] [blame] | 81 | goto next; |
Martin K. Petersen | 2341c2f8 | 2014-09-26 19:20:07 -0400 | [diff] [blame] | 82 | } |
| 83 | |
| 84 | csum = fn(iter->data_buf, iter->interval); |
| 85 | |
| 86 | if (pi->guard_tag != csum) { |
| 87 | pr_err("%s: guard tag error at sector %llu " \ |
| 88 | "(rcvd %04x, want %04x)\n", iter->disk_name, |
| 89 | (unsigned long long)iter->seed, |
| 90 | be16_to_cpu(pi->guard_tag), be16_to_cpu(csum)); |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 91 | return BLK_STS_PROTECTION; |
Martin K. Petersen | 2341c2f8 | 2014-09-26 19:20:07 -0400 | [diff] [blame] | 92 | } |
| 93 | |
| 94 | next: |
| 95 | iter->data_buf += iter->interval; |
| 96 | iter->prot_buf += sizeof(struct t10_pi_tuple); |
| 97 | iter->seed++; |
| 98 | } |
| 99 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 100 | return BLK_STS_OK; |
Martin K. Petersen | 2341c2f8 | 2014-09-26 19:20:07 -0400 | [diff] [blame] | 101 | } |
| 102 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 103 | static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter) |
Martin K. Petersen | 2341c2f8 | 2014-09-26 19:20:07 -0400 | [diff] [blame] | 104 | { |
Max Gurtovoy | 5eaed68 | 2019-09-16 18:44:28 +0300 | [diff] [blame] | 105 | return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION); |
Martin K. Petersen | 2341c2f8 | 2014-09-26 19:20:07 -0400 | [diff] [blame] | 106 | } |
| 107 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 108 | static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter) |
Martin K. Petersen | 2341c2f8 | 2014-09-26 19:20:07 -0400 | [diff] [blame] | 109 | { |
Max Gurtovoy | 5eaed68 | 2019-09-16 18:44:28 +0300 | [diff] [blame] | 110 | return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION); |
Martin K. Petersen | 2341c2f8 | 2014-09-26 19:20:07 -0400 | [diff] [blame] | 111 | } |
| 112 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 113 | static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter) |
Martin K. Petersen | 2341c2f8 | 2014-09-26 19:20:07 -0400 | [diff] [blame] | 114 | { |
Max Gurtovoy | 5eaed68 | 2019-09-16 18:44:28 +0300 | [diff] [blame] | 115 | return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION); |
Martin K. Petersen | 2341c2f8 | 2014-09-26 19:20:07 -0400 | [diff] [blame] | 116 | } |
| 117 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 118 | static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter) |
Martin K. Petersen | 2341c2f8 | 2014-09-26 19:20:07 -0400 | [diff] [blame] | 119 | { |
Max Gurtovoy | 5eaed68 | 2019-09-16 18:44:28 +0300 | [diff] [blame] | 120 | return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION); |
Martin K. Petersen | 2341c2f8 | 2014-09-26 19:20:07 -0400 | [diff] [blame] | 121 | } |
| 122 | |
Max Gurtovoy | 10c41dd | 2018-07-30 00:15:32 +0300 | [diff] [blame] | 123 | /** |
Max Gurtovoy | 54d4e6a | 2019-09-16 18:44:29 +0300 | [diff] [blame] | 124 | * t10_pi_type1_prepare - prepare PI prior submitting request to device |
Max Gurtovoy | 10c41dd | 2018-07-30 00:15:32 +0300 | [diff] [blame] | 125 | * @rq: request with PI that should be prepared |
Max Gurtovoy | 10c41dd | 2018-07-30 00:15:32 +0300 | [diff] [blame] | 126 | * |
| 127 | * For Type 1/Type 2, the virtual start sector is the one that was |
| 128 | * originally submitted by the block layer for the ref_tag usage. Due to |
| 129 | * partitioning, MD/DM cloning, etc. the actual physical start sector is |
| 130 | * likely to be different. Remap protection information to match the |
| 131 | * physical LBA. |
Max Gurtovoy | 10c41dd | 2018-07-30 00:15:32 +0300 | [diff] [blame] | 132 | */ |
Max Gurtovoy | 54d4e6a | 2019-09-16 18:44:29 +0300 | [diff] [blame] | 133 | static void t10_pi_type1_prepare(struct request *rq) |
Max Gurtovoy | 10c41dd | 2018-07-30 00:15:32 +0300 | [diff] [blame] | 134 | { |
| 135 | const int tuple_sz = rq->q->integrity.tuple_size; |
| 136 | u32 ref_tag = t10_pi_ref_tag(rq); |
| 137 | struct bio *bio; |
| 138 | |
Max Gurtovoy | 10c41dd | 2018-07-30 00:15:32 +0300 | [diff] [blame] | 139 | __rq_for_each_bio(bio, rq) { |
| 140 | struct bio_integrity_payload *bip = bio_integrity(bio); |
| 141 | u32 virt = bip_get_seed(bip) & 0xffffffff; |
| 142 | struct bio_vec iv; |
| 143 | struct bvec_iter iter; |
| 144 | |
| 145 | /* Already remapped? */ |
| 146 | if (bip->bip_flags & BIP_MAPPED_INTEGRITY) |
| 147 | break; |
| 148 | |
| 149 | bip_for_each_vec(iv, bip, iter) { |
Max Gurtovoy | 10c41dd | 2018-07-30 00:15:32 +0300 | [diff] [blame] | 150 | unsigned int j; |
Christoph Hellwig | 8aec120 | 2021-07-27 07:56:45 +0200 | [diff] [blame] | 151 | void *p; |
Max Gurtovoy | 10c41dd | 2018-07-30 00:15:32 +0300 | [diff] [blame] | 152 | |
Christoph Hellwig | 8aec120 | 2021-07-27 07:56:45 +0200 | [diff] [blame] | 153 | p = bvec_kmap_local(&iv); |
Max Gurtovoy | 10c41dd | 2018-07-30 00:15:32 +0300 | [diff] [blame] | 154 | for (j = 0; j < iv.bv_len; j += tuple_sz) { |
| 155 | struct t10_pi_tuple *pi = p; |
| 156 | |
| 157 | if (be32_to_cpu(pi->ref_tag) == virt) |
| 158 | pi->ref_tag = cpu_to_be32(ref_tag); |
| 159 | virt++; |
| 160 | ref_tag++; |
| 161 | p += tuple_sz; |
| 162 | } |
Christoph Hellwig | 8aec120 | 2021-07-27 07:56:45 +0200 | [diff] [blame] | 163 | kunmap_local(p); |
Max Gurtovoy | 10c41dd | 2018-07-30 00:15:32 +0300 | [diff] [blame] | 164 | } |
| 165 | |
| 166 | bip->bip_flags |= BIP_MAPPED_INTEGRITY; |
| 167 | } |
| 168 | } |
Max Gurtovoy | 10c41dd | 2018-07-30 00:15:32 +0300 | [diff] [blame] | 169 | |
| 170 | /** |
Max Gurtovoy | 54d4e6a | 2019-09-16 18:44:29 +0300 | [diff] [blame] | 171 | * t10_pi_type1_complete - prepare PI prior returning request to the blk layer |
Max Gurtovoy | 10c41dd | 2018-07-30 00:15:32 +0300 | [diff] [blame] | 172 | * @rq: request with PI that should be prepared |
Max Gurtovoy | 54d4e6a | 2019-09-16 18:44:29 +0300 | [diff] [blame] | 173 | * @nr_bytes: total bytes to prepare |
Max Gurtovoy | 10c41dd | 2018-07-30 00:15:32 +0300 | [diff] [blame] | 174 | * |
| 175 | * For Type 1/Type 2, the virtual start sector is the one that was |
| 176 | * originally submitted by the block layer for the ref_tag usage. Due to |
| 177 | * partitioning, MD/DM cloning, etc. the actual physical start sector is |
| 178 | * likely to be different. Since the physical start sector was submitted |
| 179 | * to the device, we should remap it back to virtual values expected by the |
| 180 | * block layer. |
Max Gurtovoy | 10c41dd | 2018-07-30 00:15:32 +0300 | [diff] [blame] | 181 | */ |
Max Gurtovoy | 54d4e6a | 2019-09-16 18:44:29 +0300 | [diff] [blame] | 182 | static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes) |
Max Gurtovoy | 10c41dd | 2018-07-30 00:15:32 +0300 | [diff] [blame] | 183 | { |
Max Gurtovoy | 54d4e6a | 2019-09-16 18:44:29 +0300 | [diff] [blame] | 184 | unsigned intervals = nr_bytes >> rq->q->integrity.interval_exp; |
Max Gurtovoy | 10c41dd | 2018-07-30 00:15:32 +0300 | [diff] [blame] | 185 | const int tuple_sz = rq->q->integrity.tuple_size; |
| 186 | u32 ref_tag = t10_pi_ref_tag(rq); |
| 187 | struct bio *bio; |
| 188 | |
Max Gurtovoy | 10c41dd | 2018-07-30 00:15:32 +0300 | [diff] [blame] | 189 | __rq_for_each_bio(bio, rq) { |
| 190 | struct bio_integrity_payload *bip = bio_integrity(bio); |
| 191 | u32 virt = bip_get_seed(bip) & 0xffffffff; |
| 192 | struct bio_vec iv; |
| 193 | struct bvec_iter iter; |
| 194 | |
| 195 | bip_for_each_vec(iv, bip, iter) { |
Max Gurtovoy | 10c41dd | 2018-07-30 00:15:32 +0300 | [diff] [blame] | 196 | unsigned int j; |
Christoph Hellwig | 8aec120 | 2021-07-27 07:56:45 +0200 | [diff] [blame] | 197 | void *p; |
Max Gurtovoy | 10c41dd | 2018-07-30 00:15:32 +0300 | [diff] [blame] | 198 | |
Christoph Hellwig | 8aec120 | 2021-07-27 07:56:45 +0200 | [diff] [blame] | 199 | p = bvec_kmap_local(&iv); |
Max Gurtovoy | 10c41dd | 2018-07-30 00:15:32 +0300 | [diff] [blame] | 200 | for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) { |
| 201 | struct t10_pi_tuple *pi = p; |
| 202 | |
| 203 | if (be32_to_cpu(pi->ref_tag) == ref_tag) |
| 204 | pi->ref_tag = cpu_to_be32(virt); |
| 205 | virt++; |
| 206 | ref_tag++; |
| 207 | intervals--; |
| 208 | p += tuple_sz; |
| 209 | } |
Christoph Hellwig | 8aec120 | 2021-07-27 07:56:45 +0200 | [diff] [blame] | 210 | kunmap_local(p); |
Max Gurtovoy | 10c41dd | 2018-07-30 00:15:32 +0300 | [diff] [blame] | 211 | } |
| 212 | } |
| 213 | } |
Max Gurtovoy | 54d4e6a | 2019-09-16 18:44:29 +0300 | [diff] [blame] | 214 | |
| 215 | static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter) |
| 216 | { |
| 217 | return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION); |
| 218 | } |
| 219 | |
| 220 | static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter) |
| 221 | { |
| 222 | return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION); |
| 223 | } |
| 224 | |
| 225 | static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter) |
| 226 | { |
| 227 | return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION); |
| 228 | } |
| 229 | |
| 230 | static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter) |
| 231 | { |
| 232 | return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION); |
| 233 | } |
| 234 | |
Bart Van Assche | 98e5440 | 2019-09-30 16:00:40 -0700 | [diff] [blame] | 235 | /* Type 3 does not have a reference tag so no remapping is required. */ |
Max Gurtovoy | 54d4e6a | 2019-09-16 18:44:29 +0300 | [diff] [blame] | 236 | static void t10_pi_type3_prepare(struct request *rq) |
| 237 | { |
| 238 | } |
| 239 | |
Bart Van Assche | 98e5440 | 2019-09-30 16:00:40 -0700 | [diff] [blame] | 240 | /* Type 3 does not have a reference tag so no remapping is required. */ |
Max Gurtovoy | 54d4e6a | 2019-09-16 18:44:29 +0300 | [diff] [blame] | 241 | static void t10_pi_type3_complete(struct request *rq, unsigned int nr_bytes) |
| 242 | { |
| 243 | } |
| 244 | |
| 245 | const struct blk_integrity_profile t10_pi_type1_crc = { |
| 246 | .name = "T10-DIF-TYPE1-CRC", |
| 247 | .generate_fn = t10_pi_type1_generate_crc, |
| 248 | .verify_fn = t10_pi_type1_verify_crc, |
| 249 | .prepare_fn = t10_pi_type1_prepare, |
| 250 | .complete_fn = t10_pi_type1_complete, |
| 251 | }; |
| 252 | EXPORT_SYMBOL(t10_pi_type1_crc); |
| 253 | |
| 254 | const struct blk_integrity_profile t10_pi_type1_ip = { |
| 255 | .name = "T10-DIF-TYPE1-IP", |
| 256 | .generate_fn = t10_pi_type1_generate_ip, |
| 257 | .verify_fn = t10_pi_type1_verify_ip, |
| 258 | .prepare_fn = t10_pi_type1_prepare, |
| 259 | .complete_fn = t10_pi_type1_complete, |
| 260 | }; |
| 261 | EXPORT_SYMBOL(t10_pi_type1_ip); |
| 262 | |
| 263 | const struct blk_integrity_profile t10_pi_type3_crc = { |
| 264 | .name = "T10-DIF-TYPE3-CRC", |
| 265 | .generate_fn = t10_pi_type3_generate_crc, |
| 266 | .verify_fn = t10_pi_type3_verify_crc, |
| 267 | .prepare_fn = t10_pi_type3_prepare, |
| 268 | .complete_fn = t10_pi_type3_complete, |
| 269 | }; |
| 270 | EXPORT_SYMBOL(t10_pi_type3_crc); |
| 271 | |
| 272 | const struct blk_integrity_profile t10_pi_type3_ip = { |
| 273 | .name = "T10-DIF-TYPE3-IP", |
| 274 | .generate_fn = t10_pi_type3_generate_ip, |
| 275 | .verify_fn = t10_pi_type3_verify_ip, |
| 276 | .prepare_fn = t10_pi_type3_prepare, |
| 277 | .complete_fn = t10_pi_type3_complete, |
| 278 | }; |
| 279 | EXPORT_SYMBOL(t10_pi_type3_ip); |
Herbert Xu | a754bd5 | 2019-12-23 16:13:51 +0800 | [diff] [blame] | 280 | |
| 281 | MODULE_LICENSE("GPL"); |