^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * t10_pi.c - Functions for generating and verifying T10 Protection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/t10-pi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/crc-t10dif.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <net/checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) typedef __be16 (csum_fn) (void *, unsigned int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) static __be16 t10_pi_crc_fn(void *data, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) return cpu_to_be16(crc_t10dif(data, len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static __be16 t10_pi_ip_fn(void *data, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) return (__force __be16)ip_compute_csum(data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Type 1 and Type 2 protection use the same format: 16 bit guard tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * tag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) csum_fn *fn, enum t10_dif_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) for (i = 0 ; i < iter->data_size ; i += iter->interval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct t10_pi_tuple *pi = iter->prot_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) pi->guard_tag = fn(iter->data_buf, iter->interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) pi->app_tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (type == T10_PI_TYPE1_PROTECTION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) pi->ref_tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) iter->data_buf += iter->interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) iter->prot_buf += sizeof(struct t10_pi_tuple);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) iter->seed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) csum_fn *fn, enum t10_dif_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) BUG_ON(type == T10_PI_TYPE0_PROTECTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) for (i = 0 ; i < iter->data_size ; i += iter->interval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct t10_pi_tuple *pi = iter->prot_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) __be16 csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (type == T10_PI_TYPE1_PROTECTION ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) type == T10_PI_TYPE2_PROTECTION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (pi->app_tag == T10_PI_APP_ESCAPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (be32_to_cpu(pi->ref_tag) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) lower_32_bits(iter->seed)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) pr_err("%s: ref tag error at location %llu " \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) "(rcvd %u)\n", iter->disk_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) (unsigned long long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) iter->seed, be32_to_cpu(pi->ref_tag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return BLK_STS_PROTECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) } else if (type == T10_PI_TYPE3_PROTECTION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (pi->app_tag == T10_PI_APP_ESCAPE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) pi->ref_tag == T10_PI_REF_ESCAPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) csum = fn(iter->data_buf, iter->interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (pi->guard_tag != csum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) pr_err("%s: guard tag error at sector %llu " \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) "(rcvd %04x, want %04x)\n", iter->disk_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) (unsigned long long)iter->seed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) be16_to_cpu(pi->guard_tag), be16_to_cpu(csum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return BLK_STS_PROTECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) iter->data_buf += iter->interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) iter->prot_buf += sizeof(struct t10_pi_tuple);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) iter->seed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * t10_pi_type1_prepare - prepare PI prior submitting request to device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * @rq: request with PI that should be prepared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * For Type 1/Type 2, the virtual start sector is the one that was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * originally submitted by the block layer for the ref_tag usage. Due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * partitioning, MD/DM cloning, etc. the actual physical start sector is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * likely to be different. Remap protection information to match the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * physical LBA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static void t10_pi_type1_prepare(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) const int tuple_sz = rq->q->integrity.tuple_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) u32 ref_tag = t10_pi_ref_tag(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) __rq_for_each_bio(bio, rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct bio_integrity_payload *bip = bio_integrity(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) u32 virt = bip_get_seed(bip) & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct bio_vec iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct bvec_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* Already remapped? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) bip_for_each_vec(iv, bip, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) void *p, *pmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) unsigned int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) pmap = kmap_atomic(iv.bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) p = pmap + iv.bv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) for (j = 0; j < iv.bv_len; j += tuple_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct t10_pi_tuple *pi = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (be32_to_cpu(pi->ref_tag) == virt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) pi->ref_tag = cpu_to_be32(ref_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) virt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) ref_tag++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) p += tuple_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) kunmap_atomic(pmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) bip->bip_flags |= BIP_MAPPED_INTEGRITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * t10_pi_type1_complete - prepare PI prior returning request to the blk layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * @rq: request with PI that should be prepared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * @nr_bytes: total bytes to prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * For Type 1/Type 2, the virtual start sector is the one that was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * originally submitted by the block layer for the ref_tag usage. Due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * partitioning, MD/DM cloning, etc. the actual physical start sector is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * likely to be different. Since the physical start sector was submitted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * to the device, we should remap it back to virtual values expected by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * block layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) unsigned intervals = nr_bytes >> rq->q->integrity.interval_exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) const int tuple_sz = rq->q->integrity.tuple_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) u32 ref_tag = t10_pi_ref_tag(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) __rq_for_each_bio(bio, rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct bio_integrity_payload *bip = bio_integrity(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) u32 virt = bip_get_seed(bip) & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct bio_vec iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct bvec_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) bip_for_each_vec(iv, bip, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) void *p, *pmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) pmap = kmap_atomic(iv.bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) p = pmap + iv.bv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct t10_pi_tuple *pi = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (be32_to_cpu(pi->ref_tag) == ref_tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) pi->ref_tag = cpu_to_be32(virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) virt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ref_tag++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) intervals--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) p += tuple_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) kunmap_atomic(pmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* Type 3 does not have a reference tag so no remapping is required. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static void t10_pi_type3_prepare(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /* Type 3 does not have a reference tag so no remapping is required. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static void t10_pi_type3_complete(struct request *rq, unsigned int nr_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) const struct blk_integrity_profile t10_pi_type1_crc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) .name = "T10-DIF-TYPE1-CRC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) .generate_fn = t10_pi_type1_generate_crc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) .verify_fn = t10_pi_type1_verify_crc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) .prepare_fn = t10_pi_type1_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) .complete_fn = t10_pi_type1_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) EXPORT_SYMBOL(t10_pi_type1_crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) const struct blk_integrity_profile t10_pi_type1_ip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) .name = "T10-DIF-TYPE1-IP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) .generate_fn = t10_pi_type1_generate_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) .verify_fn = t10_pi_type1_verify_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) .prepare_fn = t10_pi_type1_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) .complete_fn = t10_pi_type1_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) EXPORT_SYMBOL(t10_pi_type1_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) const struct blk_integrity_profile t10_pi_type3_crc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) .name = "T10-DIF-TYPE3-CRC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) .generate_fn = t10_pi_type3_generate_crc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) .verify_fn = t10_pi_type3_verify_crc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) .prepare_fn = t10_pi_type3_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) .complete_fn = t10_pi_type3_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) EXPORT_SYMBOL(t10_pi_type3_crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) const struct blk_integrity_profile t10_pi_type3_ip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) .name = "T10-DIF-TYPE3-IP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) .generate_fn = t10_pi_type3_generate_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) .verify_fn = t10_pi_type3_verify_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) .prepare_fn = t10_pi_type3_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) .complete_fn = t10_pi_type3_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) EXPORT_SYMBOL(t10_pi_type3_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) MODULE_LICENSE("GPL");