block: move dif_prepare/dif_complete functions to block layer

Currently these functions are implemented in the scsi layer, but their
actual place should be the block layer since T10-PI is a general data
integrity feature that is used in the nvme protocol as well. Also, use
the tuple size from the integrity profile since it may vary between
integrity types.

Suggested-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Max Gurtovoy <maxg@mellanox.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Max Gurtovoy
2018-07-30 00:15:32 +03:00
committed by Jens Axboe
parent ddd0bc7569
commit 10c41ddd61
5 changed files with 118 additions and 125 deletions

View File

@@ -95,116 +95,3 @@ out:
blk_integrity_register(disk, &bi);
}
/*
* The virtual start sector is the one that was originally submitted
* by the block layer. Due to partitioning, MD/DM cloning, etc. the
* actual physical start sector is likely to be different. Remap
* protection information to match the physical LBA.
*
* From a protocol perspective there's a slight difference between
* Type 1 and 2. The latter uses 32-byte CDBs exclusively, and the
* reference tag is seeded in the CDB. This gives us the potential to
* avoid virt->phys remapping during write. However, at read time we
* don't know whether the virt sector is the same as when we wrote it
* (we could be reading from real disk as opposed to MD/DM device. So
* we always remap Type 2 making it identical to Type 1.
*
* Type 3 does not have a reference tag so no remapping is required.
*/
void sd_dif_prepare(struct scsi_cmnd *scmd)
{
const int tuple_sz = sizeof(struct t10_pi_tuple);
struct bio *bio;
struct scsi_disk *sdkp;
struct t10_pi_tuple *pi;
u32 phys, virt;
sdkp = scsi_disk(scmd->request->rq_disk);
if (sdkp->protection_type == T10_PI_TYPE3_PROTECTION)
return;
phys = t10_pi_ref_tag(scmd->request);
__rq_for_each_bio(bio, scmd->request) {
struct bio_integrity_payload *bip = bio_integrity(bio);
struct bio_vec iv;
struct bvec_iter iter;
unsigned int j;
/* Already remapped? */
if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
break;
virt = bip_get_seed(bip) & 0xffffffff;
bip_for_each_vec(iv, bip, iter) {
pi = kmap_atomic(iv.bv_page) + iv.bv_offset;
for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) {
if (be32_to_cpu(pi->ref_tag) == virt)
pi->ref_tag = cpu_to_be32(phys);
virt++;
phys++;
}
kunmap_atomic(pi);
}
bip->bip_flags |= BIP_MAPPED_INTEGRITY;
}
}
/*
* Remap physical sector values in the reference tag to the virtual
* values expected by the block layer.
*/
void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
{
const int tuple_sz = sizeof(struct t10_pi_tuple);
struct scsi_disk *sdkp;
struct bio *bio;
struct t10_pi_tuple *pi;
unsigned int j, intervals;
u32 phys, virt;
sdkp = scsi_disk(scmd->request->rq_disk);
if (sdkp->protection_type == T10_PI_TYPE3_PROTECTION || good_bytes == 0)
return;
intervals = good_bytes / scsi_prot_interval(scmd);
phys = t10_pi_ref_tag(scmd->request);
__rq_for_each_bio(bio, scmd->request) {
struct bio_integrity_payload *bip = bio_integrity(bio);
struct bio_vec iv;
struct bvec_iter iter;
virt = bip_get_seed(bip) & 0xffffffff;
bip_for_each_vec(iv, bip, iter) {
pi = kmap_atomic(iv.bv_page) + iv.bv_offset;
for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) {
if (intervals == 0) {
kunmap_atomic(pi);
return;
}
if (be32_to_cpu(pi->ref_tag) == phys)
pi->ref_tag = cpu_to_be32(virt);
virt++;
phys++;
intervals--;
}
kunmap_atomic(pi);
}
}
}