for-5.4/post-2019-09-24

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl2J8xQQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpujgD/94s9GGKN8JShxCpT0YNuWyyFF5gNlaimQU
 RSGAwnv2YUgEGNSUOPpcaj5FAYhTfYzbqoHlE+jytA2U5KXTOhc5Z85QV+TY4HPs
 I03xczYuYD/uX0QuF00zU2+6eV3lETELPiBARbfEQdHfm72iwurweHzlh4dfhbxW
 P7UA/cKixXWF2CH9wg5347Ll93nD24f2pi8BUyLJi/xpdlaRrN11Ii8AzNlRmq52
 VRxURuogl98W89F6EV2VhPGFgUEYHY2Ot7II2OqqV+jmjHDQW9y5hximzINOqkxs
 bQwo5J+WrDSPoqwl8+db2k7QQjAl1XKDAHmCwz+7J/BoOgZj8/M1FMBwzita+5x+
 UqxEYe7k+2G3w2zuhBrq03BypU8pwqFep/QI0cCCPaHs4J5QnkVOScEqd6iV/C3T
 FPvMvqDf7MrElghj4Qa2IZlh/CgqmLG5NUEz8E40cXkdiP+E+eK9ZY2Uwx2XhBrm
 7Gl+SpG5DxWqqJeRNVWjFwM4p5L+01NtwDbTjZ1rsf+mCW5cNsy/L9B4UpPz4HxW
 coAs0y/Ce+ZhCopIXZ4jLDBoTG9yoVg8EcyfaHKD2Zz0mUFxa2xm+LvXKeT49qqx
 xuodpKD3fiuM7h9Xgv+cDsmn8Rr8gSeXEGV7qzpudmkxbp6IVg/yG5hC/dM921GR
 EVrRtUIwdw==
 =aAPP
 -----END PGP SIGNATURE-----

Merge tag 'for-5.4/post-2019-09-24' of git://git.kernel.dk/linux-block

Pull more block updates from Jens Axboe:
 "Some later additions that weren't quite done for the first pull
  request, and also a few fixes that have arrived since.

  This contains:

   - Kill silly pktcdvd warning on attempting to register a non-scsi
     passthrough device (me)

   - Use symbolic constants for the block t10 protection types, and
     switch to handling it in core rather than in the drivers (Max)

   - libahci platform missing node put fix (Nishka)

   - Small series of fixes for BFQ (Paolo)

   - Fix possible nbd crash (Xiubo)"

* tag 'for-5.4/post-2019-09-24' of git://git.kernel.dk/linux-block:
  block: drop device references in bsg_queue_rq()
  block: t10-pi: fix -Wswitch warning
  pktcdvd: remove warning on attempting to register non-passthrough dev
  ata: libahci_platform: Add of_node_put() before loop exit
  nbd: fix possible page fault for nbd disk
  nbd: rename the runtime flags as NBD_RT_ prefixed
  block, bfq: push up injection only after setting service time
  block, bfq: increase update frequency of inject limit
  block, bfq: reduce upper bound for inject limit to max_rq_in_driver+1
  block, bfq: update inject limit only after injection occurred
  block: centralize PI remapping logic to the block layer
  block: use symbolic constants for t10_pi type
This commit is contained in:
Linus Torvalds 2019-09-24 16:31:50 -07:00
commit 2e959dd87a
14 changed files with 237 additions and 164 deletions

View File

@ -2016,7 +2016,7 @@ static void bfq_add_request(struct request *rq)
(bfqq->last_serv_time_ns > 0 && (bfqq->last_serv_time_ns > 0 &&
bfqd->rqs_injected && bfqd->rq_in_driver > 0)) && bfqd->rqs_injected && bfqd->rq_in_driver > 0)) &&
time_is_before_eq_jiffies(bfqq->decrease_time_jif + time_is_before_eq_jiffies(bfqq->decrease_time_jif +
msecs_to_jiffies(100))) { msecs_to_jiffies(10))) {
bfqd->last_empty_occupied_ns = ktime_get_ns(); bfqd->last_empty_occupied_ns = ktime_get_ns();
/* /*
* Start the state machine for measuring the * Start the state machine for measuring the
@ -2025,6 +2025,20 @@ static void bfq_add_request(struct request *rq)
* be set when rq will be dispatched. * be set when rq will be dispatched.
*/ */
bfqd->wait_dispatch = true; bfqd->wait_dispatch = true;
/*
* If there is no I/O in service in the drive,
* then possible injection occurred before the
* arrival of rq will not affect the total
* service time of rq. So the injection limit
* must not be updated as a function of such
* total service time, unless new injection
* occurs before rq is completed. To have the
* injection limit updated only in the latter
* case, reset rqs_injected here (rqs_injected
* will be set in case injection is performed
* on bfqq before rq is completed).
*/
if (bfqd->rq_in_driver == 0)
bfqd->rqs_injected = false; bfqd->rqs_injected = false;
} }
} }
@ -5784,14 +5798,14 @@ static void bfq_update_inject_limit(struct bfq_data *bfqd,
u64 tot_time_ns = ktime_get_ns() - bfqd->last_empty_occupied_ns; u64 tot_time_ns = ktime_get_ns() - bfqd->last_empty_occupied_ns;
unsigned int old_limit = bfqq->inject_limit; unsigned int old_limit = bfqq->inject_limit;
if (bfqq->last_serv_time_ns > 0) { if (bfqq->last_serv_time_ns > 0 && bfqd->rqs_injected) {
u64 threshold = (bfqq->last_serv_time_ns * 3)>>1; u64 threshold = (bfqq->last_serv_time_ns * 3)>>1;
if (tot_time_ns >= threshold && old_limit > 0) { if (tot_time_ns >= threshold && old_limit > 0) {
bfqq->inject_limit--; bfqq->inject_limit--;
bfqq->decrease_time_jif = jiffies; bfqq->decrease_time_jif = jiffies;
} else if (tot_time_ns < threshold && } else if (tot_time_ns < threshold &&
old_limit < bfqd->max_rq_in_driver<<1) old_limit <= bfqd->max_rq_in_driver)
bfqq->inject_limit++; bfqq->inject_limit++;
} }
@ -5809,12 +5823,14 @@ static void bfq_update_inject_limit(struct bfq_data *bfqd,
*/ */
if ((bfqq->last_serv_time_ns == 0 && bfqd->rq_in_driver == 1) || if ((bfqq->last_serv_time_ns == 0 && bfqd->rq_in_driver == 1) ||
tot_time_ns < bfqq->last_serv_time_ns) { tot_time_ns < bfqq->last_serv_time_ns) {
bfqq->last_serv_time_ns = tot_time_ns; if (bfqq->last_serv_time_ns == 0) {
/* /*
* Now we certainly have a base value: make sure we * Now we certainly have a base value: make sure we
* start trying injection. * start trying injection.
*/ */
bfqq->inject_limit = max_t(unsigned int, 1, old_limit); bfqq->inject_limit = max_t(unsigned int, 1, old_limit);
}
bfqq->last_serv_time_ns = tot_time_ns;
} else if (!bfqd->rqs_injected && bfqd->rq_in_driver == 1) } else if (!bfqd->rqs_injected && bfqd->rq_in_driver == 1)
/* /*
* No I/O injected and no request still in service in * No I/O injected and no request still in service in
@ -5830,6 +5846,7 @@ static void bfq_update_inject_limit(struct bfq_data *bfqd,
/* update complete, not waiting for any request completion any longer */ /* update complete, not waiting for any request completion any longer */
bfqd->waited_rq = NULL; bfqd->waited_rq = NULL;
bfqd->rqs_injected = false;
} }
/* /*

View File

@ -34,6 +34,7 @@
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/blk-cgroup.h> #include <linux/blk-cgroup.h>
#include <linux/t10-pi.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/bpf.h> #include <linux/bpf.h>
#include <linux/psi.h> #include <linux/psi.h>
@ -1436,6 +1437,12 @@ bool blk_update_request(struct request *req, blk_status_t error,
if (!req->bio) if (!req->bio)
return false; return false;
#ifdef CONFIG_BLK_DEV_INTEGRITY
if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
error == BLK_STS_OK)
req->q->integrity.profile->complete_fn(req, nr_bytes);
#endif
if (unlikely(error && !blk_rq_is_passthrough(req) && if (unlikely(error && !blk_rq_is_passthrough(req) &&
!(req->rq_flags & RQF_QUIET))) !(req->rq_flags & RQF_QUIET)))
print_req_error(req, error, __func__); print_req_error(req, error, __func__);

View File

@ -368,10 +368,21 @@ static blk_status_t blk_integrity_nop_fn(struct blk_integrity_iter *iter)
return BLK_STS_OK; return BLK_STS_OK;
} }
static void blk_integrity_nop_prepare(struct request *rq)
{
}
static void blk_integrity_nop_complete(struct request *rq,
unsigned int nr_bytes)
{
}
static const struct blk_integrity_profile nop_profile = { static const struct blk_integrity_profile nop_profile = {
.name = "nop", .name = "nop",
.generate_fn = blk_integrity_nop_fn, .generate_fn = blk_integrity_nop_fn,
.verify_fn = blk_integrity_nop_fn, .verify_fn = blk_integrity_nop_fn,
.prepare_fn = blk_integrity_nop_prepare,
.complete_fn = blk_integrity_nop_complete,
}; };
/** /**

View File

@ -30,6 +30,7 @@
#include <trace/events/block.h> #include <trace/events/block.h>
#include <linux/blk-mq.h> #include <linux/blk-mq.h>
#include <linux/t10-pi.h>
#include "blk.h" #include "blk.h"
#include "blk-mq.h" #include "blk-mq.h"
#include "blk-mq-debugfs.h" #include "blk-mq-debugfs.h"
@ -700,6 +701,11 @@ void blk_mq_start_request(struct request *rq)
*/ */
rq->nr_phys_segments++; rq->nr_phys_segments++;
} }
#ifdef CONFIG_BLK_DEV_INTEGRITY
if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
q->integrity.profile->prepare_fn(rq);
#endif
} }
EXPORT_SYMBOL(blk_mq_start_request); EXPORT_SYMBOL(blk_mq_start_request);

View File

@ -266,6 +266,7 @@ static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx,
struct request *req = bd->rq; struct request *req = bd->rq;
struct bsg_set *bset = struct bsg_set *bset =
container_of(q->tag_set, struct bsg_set, tag_set); container_of(q->tag_set, struct bsg_set, tag_set);
int sts = BLK_STS_IOERR;
int ret; int ret;
blk_mq_start_request(req); blk_mq_start_request(req);
@ -274,14 +275,15 @@ static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_IOERR; return BLK_STS_IOERR;
if (!bsg_prepare_job(dev, req)) if (!bsg_prepare_job(dev, req))
return BLK_STS_IOERR; goto out;
ret = bset->job_fn(blk_mq_rq_to_pdu(req)); ret = bset->job_fn(blk_mq_rq_to_pdu(req));
if (ret) if (!ret)
return BLK_STS_IOERR; sts = BLK_STS_OK;
out:
put_device(dev); put_device(dev);
return BLK_STS_OK; return sts;
} }
/* called right after the request is allocated for the request_queue */ /* called right after the request is allocated for the request_queue */

View File

@ -27,7 +27,7 @@ static __be16 t10_pi_ip_fn(void *data, unsigned int len)
* tag. * tag.
*/ */
static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter, static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
csum_fn *fn, unsigned int type) csum_fn *fn, enum t10_dif_type type)
{ {
unsigned int i; unsigned int i;
@ -37,7 +37,7 @@ static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
pi->guard_tag = fn(iter->data_buf, iter->interval); pi->guard_tag = fn(iter->data_buf, iter->interval);
pi->app_tag = 0; pi->app_tag = 0;
if (type == 1) if (type == T10_PI_TYPE1_PROTECTION)
pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed)); pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed));
else else
pi->ref_tag = 0; pi->ref_tag = 0;
@ -51,17 +51,18 @@ static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
} }
static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter, static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
csum_fn *fn, unsigned int type) csum_fn *fn, enum t10_dif_type type)
{ {
unsigned int i; unsigned int i;
BUG_ON(type == T10_PI_TYPE0_PROTECTION);
for (i = 0 ; i < iter->data_size ; i += iter->interval) { for (i = 0 ; i < iter->data_size ; i += iter->interval) {
struct t10_pi_tuple *pi = iter->prot_buf; struct t10_pi_tuple *pi = iter->prot_buf;
__be16 csum; __be16 csum;
switch (type) { if (type == T10_PI_TYPE1_PROTECTION ||
case 1: type == T10_PI_TYPE2_PROTECTION) {
case 2:
if (pi->app_tag == T10_PI_APP_ESCAPE) if (pi->app_tag == T10_PI_APP_ESCAPE)
goto next; goto next;
@ -73,12 +74,10 @@ static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
iter->seed, be32_to_cpu(pi->ref_tag)); iter->seed, be32_to_cpu(pi->ref_tag));
return BLK_STS_PROTECTION; return BLK_STS_PROTECTION;
} }
break; } else if (type == T10_PI_TYPE3_PROTECTION) {
case 3:
if (pi->app_tag == T10_PI_APP_ESCAPE && if (pi->app_tag == T10_PI_APP_ESCAPE &&
pi->ref_tag == T10_PI_REF_ESCAPE) pi->ref_tag == T10_PI_REF_ESCAPE)
goto next; goto next;
break;
} }
csum = fn(iter->data_buf, iter->interval); csum = fn(iter->data_buf, iter->interval);
@ -102,94 +101,40 @@ static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter) static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
{ {
return t10_pi_generate(iter, t10_pi_crc_fn, 1); return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
} }
static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter) static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
{ {
return t10_pi_generate(iter, t10_pi_ip_fn, 1); return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
} }
static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter) static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
{ {
return t10_pi_verify(iter, t10_pi_crc_fn, 1); return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
} }
static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter) static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
{ {
return t10_pi_verify(iter, t10_pi_ip_fn, 1); return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
} }
static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
{
return t10_pi_generate(iter, t10_pi_crc_fn, 3);
}
static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
{
return t10_pi_generate(iter, t10_pi_ip_fn, 3);
}
static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
{
return t10_pi_verify(iter, t10_pi_crc_fn, 3);
}
static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
{
return t10_pi_verify(iter, t10_pi_ip_fn, 3);
}
const struct blk_integrity_profile t10_pi_type1_crc = {
.name = "T10-DIF-TYPE1-CRC",
.generate_fn = t10_pi_type1_generate_crc,
.verify_fn = t10_pi_type1_verify_crc,
};
EXPORT_SYMBOL(t10_pi_type1_crc);
const struct blk_integrity_profile t10_pi_type1_ip = {
.name = "T10-DIF-TYPE1-IP",
.generate_fn = t10_pi_type1_generate_ip,
.verify_fn = t10_pi_type1_verify_ip,
};
EXPORT_SYMBOL(t10_pi_type1_ip);
const struct blk_integrity_profile t10_pi_type3_crc = {
.name = "T10-DIF-TYPE3-CRC",
.generate_fn = t10_pi_type3_generate_crc,
.verify_fn = t10_pi_type3_verify_crc,
};
EXPORT_SYMBOL(t10_pi_type3_crc);
const struct blk_integrity_profile t10_pi_type3_ip = {
.name = "T10-DIF-TYPE3-IP",
.generate_fn = t10_pi_type3_generate_ip,
.verify_fn = t10_pi_type3_verify_ip,
};
EXPORT_SYMBOL(t10_pi_type3_ip);
/** /**
* t10_pi_prepare - prepare PI prior submitting request to device * t10_pi_type1_prepare - prepare PI prior submitting request to device
* @rq: request with PI that should be prepared * @rq: request with PI that should be prepared
* @protection_type: PI type (Type 1/Type 2/Type 3)
* *
* For Type 1/Type 2, the virtual start sector is the one that was * For Type 1/Type 2, the virtual start sector is the one that was
* originally submitted by the block layer for the ref_tag usage. Due to * originally submitted by the block layer for the ref_tag usage. Due to
* partitioning, MD/DM cloning, etc. the actual physical start sector is * partitioning, MD/DM cloning, etc. the actual physical start sector is
* likely to be different. Remap protection information to match the * likely to be different. Remap protection information to match the
* physical LBA. * physical LBA.
*
* Type 3 does not have a reference tag so no remapping is required.
*/ */
void t10_pi_prepare(struct request *rq, u8 protection_type) static void t10_pi_type1_prepare(struct request *rq)
{ {
const int tuple_sz = rq->q->integrity.tuple_size; const int tuple_sz = rq->q->integrity.tuple_size;
u32 ref_tag = t10_pi_ref_tag(rq); u32 ref_tag = t10_pi_ref_tag(rq);
struct bio *bio; struct bio *bio;
if (protection_type == T10_PI_TYPE3_PROTECTION)
return;
__rq_for_each_bio(bio, rq) { __rq_for_each_bio(bio, rq) {
struct bio_integrity_payload *bip = bio_integrity(bio); struct bio_integrity_payload *bip = bio_integrity(bio);
u32 virt = bip_get_seed(bip) & 0xffffffff; u32 virt = bip_get_seed(bip) & 0xffffffff;
@ -222,13 +167,11 @@ void t10_pi_prepare(struct request *rq, u8 protection_type)
bip->bip_flags |= BIP_MAPPED_INTEGRITY; bip->bip_flags |= BIP_MAPPED_INTEGRITY;
} }
} }
EXPORT_SYMBOL(t10_pi_prepare);
/** /**
* t10_pi_complete - prepare PI prior returning request to the block layer * t10_pi_type1_complete - prepare PI prior returning request to the blk layer
* @rq: request with PI that should be prepared * @rq: request with PI that should be prepared
* @protection_type: PI type (Type 1/Type 2/Type 3) * @nr_bytes: total bytes to prepare
* @intervals: total elements to prepare
* *
* For Type 1/Type 2, the virtual start sector is the one that was * For Type 1/Type 2, the virtual start sector is the one that was
* originally submitted by the block layer for the ref_tag usage. Due to * originally submitted by the block layer for the ref_tag usage. Due to
@ -236,19 +179,14 @@ EXPORT_SYMBOL(t10_pi_prepare);
* likely to be different. Since the physical start sector was submitted * likely to be different. Since the physical start sector was submitted
* to the device, we should remap it back to virtual values expected by the * to the device, we should remap it back to virtual values expected by the
* block layer. * block layer.
*
* Type 3 does not have a reference tag so no remapping is required.
*/ */
void t10_pi_complete(struct request *rq, u8 protection_type, static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
unsigned int intervals)
{ {
unsigned intervals = nr_bytes >> rq->q->integrity.interval_exp;
const int tuple_sz = rq->q->integrity.tuple_size; const int tuple_sz = rq->q->integrity.tuple_size;
u32 ref_tag = t10_pi_ref_tag(rq); u32 ref_tag = t10_pi_ref_tag(rq);
struct bio *bio; struct bio *bio;
if (protection_type == T10_PI_TYPE3_PROTECTION)
return;
__rq_for_each_bio(bio, rq) { __rq_for_each_bio(bio, rq) {
struct bio_integrity_payload *bip = bio_integrity(bio); struct bio_integrity_payload *bip = bio_integrity(bio);
u32 virt = bip_get_seed(bip) & 0xffffffff; u32 virt = bip_get_seed(bip) & 0xffffffff;
@ -276,4 +214,73 @@ void t10_pi_complete(struct request *rq, u8 protection_type,
} }
} }
} }
EXPORT_SYMBOL(t10_pi_complete);
static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
{
return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
}
static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
{
return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
}
static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
{
return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
}
static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
{
return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
}
/**
* Type 3 does not have a reference tag so no remapping is required.
*/
static void t10_pi_type3_prepare(struct request *rq)
{
}
/**
* Type 3 does not have a reference tag so no remapping is required.
*/
static void t10_pi_type3_complete(struct request *rq, unsigned int nr_bytes)
{
}
const struct blk_integrity_profile t10_pi_type1_crc = {
.name = "T10-DIF-TYPE1-CRC",
.generate_fn = t10_pi_type1_generate_crc,
.verify_fn = t10_pi_type1_verify_crc,
.prepare_fn = t10_pi_type1_prepare,
.complete_fn = t10_pi_type1_complete,
};
EXPORT_SYMBOL(t10_pi_type1_crc);
const struct blk_integrity_profile t10_pi_type1_ip = {
.name = "T10-DIF-TYPE1-IP",
.generate_fn = t10_pi_type1_generate_ip,
.verify_fn = t10_pi_type1_verify_ip,
.prepare_fn = t10_pi_type1_prepare,
.complete_fn = t10_pi_type1_complete,
};
EXPORT_SYMBOL(t10_pi_type1_ip);
const struct blk_integrity_profile t10_pi_type3_crc = {
.name = "T10-DIF-TYPE3-CRC",
.generate_fn = t10_pi_type3_generate_crc,
.verify_fn = t10_pi_type3_verify_crc,
.prepare_fn = t10_pi_type3_prepare,
.complete_fn = t10_pi_type3_complete,
};
EXPORT_SYMBOL(t10_pi_type3_crc);
const struct blk_integrity_profile t10_pi_type3_ip = {
.name = "T10-DIF-TYPE3-IP",
.generate_fn = t10_pi_type3_generate_ip,
.verify_fn = t10_pi_type3_verify_ip,
.prepare_fn = t10_pi_type3_prepare,
.complete_fn = t10_pi_type3_complete,
};
EXPORT_SYMBOL(t10_pi_type3_ip);

View File

@ -497,6 +497,7 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
if (of_property_read_u32(child, "reg", &port)) { if (of_property_read_u32(child, "reg", &port)) {
rc = -EINVAL; rc = -EINVAL;
of_node_put(child);
goto err_out; goto err_out;
} }
@ -514,14 +515,18 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
if (port_dev) { if (port_dev) {
rc = ahci_platform_get_regulator(hpriv, port, rc = ahci_platform_get_regulator(hpriv, port,
&port_dev->dev); &port_dev->dev);
if (rc == -EPROBE_DEFER) if (rc == -EPROBE_DEFER) {
of_node_put(child);
goto err_out; goto err_out;
} }
}
#endif #endif
rc = ahci_platform_get_phy(hpriv, port, dev, child); rc = ahci_platform_get_phy(hpriv, port, dev, child);
if (rc) if (rc) {
of_node_put(child);
goto err_out; goto err_out;
}
enabled_ports++; enabled_ports++;
} }

View File

@ -26,6 +26,7 @@
#include <linux/ioctl.h> #include <linux/ioctl.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/completion.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/slab.h> #include <linux/slab.h>
@ -71,14 +72,17 @@ struct link_dead_args {
int index; int index;
}; };
#define NBD_TIMEDOUT 0 #define NBD_RT_TIMEDOUT 0
#define NBD_RT_DISCONNECT_REQUESTED 1
#define NBD_RT_DISCONNECTED 2
#define NBD_RT_HAS_PID_FILE 3
#define NBD_RT_HAS_CONFIG_REF 4
#define NBD_RT_BOUND 5
#define NBD_RT_DESTROY_ON_DISCONNECT 6
#define NBD_RT_DISCONNECT_ON_CLOSE 7
#define NBD_DESTROY_ON_DISCONNECT 0
#define NBD_DISCONNECT_REQUESTED 1 #define NBD_DISCONNECT_REQUESTED 1
#define NBD_DISCONNECTED 2
#define NBD_HAS_PID_FILE 3
#define NBD_HAS_CONFIG_REF 4
#define NBD_BOUND 5
#define NBD_DESTROY_ON_DISCONNECT 6
#define NBD_DISCONNECT_ON_CLOSE 7
struct nbd_config { struct nbd_config {
u32 flags; u32 flags;
@ -113,6 +117,9 @@ struct nbd_device {
struct list_head list; struct list_head list;
struct task_struct *task_recv; struct task_struct *task_recv;
struct task_struct *task_setup; struct task_struct *task_setup;
struct completion *destroy_complete;
unsigned long flags;
}; };
#define NBD_CMD_REQUEUED 1 #define NBD_CMD_REQUEUED 1
@ -223,6 +230,16 @@ static void nbd_dev_remove(struct nbd_device *nbd)
disk->private_data = NULL; disk->private_data = NULL;
put_disk(disk); put_disk(disk);
} }
/*
* Place this in the last just before the nbd is freed to
* make sure that the disk and the related kobject are also
* totally removed to avoid duplicate creation of the same
* one.
*/
if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && nbd->destroy_complete)
complete(nbd->destroy_complete);
kfree(nbd); kfree(nbd);
} }
@ -238,8 +255,8 @@ static void nbd_put(struct nbd_device *nbd)
static int nbd_disconnected(struct nbd_config *config) static int nbd_disconnected(struct nbd_config *config)
{ {
return test_bit(NBD_DISCONNECTED, &config->runtime_flags) || return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) ||
test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags); test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
} }
static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock, static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
@ -257,9 +274,9 @@ static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
if (!nsock->dead) { if (!nsock->dead) {
kernel_sock_shutdown(nsock->sock, SHUT_RDWR); kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
if (atomic_dec_return(&nbd->config->live_connections) == 0) { if (atomic_dec_return(&nbd->config->live_connections) == 0) {
if (test_and_clear_bit(NBD_DISCONNECT_REQUESTED, if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED,
&nbd->config->runtime_flags)) { &nbd->config->runtime_flags)) {
set_bit(NBD_DISCONNECTED, set_bit(NBD_RT_DISCONNECTED,
&nbd->config->runtime_flags); &nbd->config->runtime_flags);
dev_info(nbd_to_dev(nbd), dev_info(nbd_to_dev(nbd),
"Disconnected due to user request.\n"); "Disconnected due to user request.\n");
@ -333,7 +350,7 @@ static void sock_shutdown(struct nbd_device *nbd)
if (config->num_connections == 0) if (config->num_connections == 0)
return; return;
if (test_and_set_bit(NBD_DISCONNECTED, &config->runtime_flags)) if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
return; return;
for (i = 0; i < config->num_connections; i++) { for (i = 0; i < config->num_connections; i++) {
@ -427,7 +444,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
} }
dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n"); dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
set_bit(NBD_TIMEDOUT, &config->runtime_flags); set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags);
cmd->status = BLK_STS_IOERR; cmd->status = BLK_STS_IOERR;
mutex_unlock(&cmd->lock); mutex_unlock(&cmd->lock);
sock_shutdown(nbd); sock_shutdown(nbd);
@ -795,7 +812,7 @@ static int find_fallback(struct nbd_device *nbd, int index)
struct nbd_sock *nsock = config->socks[index]; struct nbd_sock *nsock = config->socks[index];
int fallback = nsock->fallback_index; int fallback = nsock->fallback_index;
if (test_bit(NBD_DISCONNECTED, &config->runtime_flags)) if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
return new_index; return new_index;
if (config->num_connections <= 1) { if (config->num_connections <= 1) {
@ -836,7 +853,7 @@ static int wait_for_reconnect(struct nbd_device *nbd)
struct nbd_config *config = nbd->config; struct nbd_config *config = nbd->config;
if (!config->dead_conn_timeout) if (!config->dead_conn_timeout)
return 0; return 0;
if (test_bit(NBD_DISCONNECTED, &config->runtime_flags)) if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
return 0; return 0;
return wait_event_timeout(config->conn_wait, return wait_event_timeout(config->conn_wait,
atomic_read(&config->live_connections) > 0, atomic_read(&config->live_connections) > 0,
@ -969,12 +986,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
return err; return err;
if (!netlink && !nbd->task_setup && if (!netlink && !nbd->task_setup &&
!test_bit(NBD_BOUND, &config->runtime_flags)) !test_bit(NBD_RT_BOUND, &config->runtime_flags))
nbd->task_setup = current; nbd->task_setup = current;
if (!netlink && if (!netlink &&
(nbd->task_setup != current || (nbd->task_setup != current ||
test_bit(NBD_BOUND, &config->runtime_flags))) { test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
dev_err(disk_to_dev(nbd->disk), dev_err(disk_to_dev(nbd->disk),
"Device being setup by another task"); "Device being setup by another task");
sockfd_put(sock); sockfd_put(sock);
@ -1053,7 +1070,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
mutex_unlock(&nsock->tx_lock); mutex_unlock(&nsock->tx_lock);
sockfd_put(old); sockfd_put(old);
clear_bit(NBD_DISCONNECTED, &config->runtime_flags); clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
/* We take the tx_mutex in an error path in the recv_work, so we /* We take the tx_mutex in an error path in the recv_work, so we
* need to queue_work outside of the tx_mutex. * need to queue_work outside of the tx_mutex.
@ -1124,7 +1141,8 @@ static int nbd_disconnect(struct nbd_device *nbd)
struct nbd_config *config = nbd->config; struct nbd_config *config = nbd->config;
dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
set_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags); set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags);
send_disconnects(nbd); send_disconnects(nbd);
return 0; return 0;
} }
@ -1143,7 +1161,7 @@ static void nbd_config_put(struct nbd_device *nbd)
struct nbd_config *config = nbd->config; struct nbd_config *config = nbd->config;
nbd_dev_dbg_close(nbd); nbd_dev_dbg_close(nbd);
nbd_size_clear(nbd); nbd_size_clear(nbd);
if (test_and_clear_bit(NBD_HAS_PID_FILE, if (test_and_clear_bit(NBD_RT_HAS_PID_FILE,
&config->runtime_flags)) &config->runtime_flags))
device_remove_file(disk_to_dev(nbd->disk), &pid_attr); device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
nbd->task_recv = NULL; nbd->task_recv = NULL;
@ -1209,7 +1227,7 @@ static int nbd_start_device(struct nbd_device *nbd)
dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
return error; return error;
} }
set_bit(NBD_HAS_PID_FILE, &config->runtime_flags); set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags);
nbd_dev_dbg_init(nbd); nbd_dev_dbg_init(nbd);
for (i = 0; i < num_connections; i++) { for (i = 0; i < num_connections; i++) {
@ -1256,9 +1274,9 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
mutex_lock(&nbd->config_lock); mutex_lock(&nbd->config_lock);
nbd_bdev_reset(bdev); nbd_bdev_reset(bdev);
/* user requested, ignore socket errors */ /* user requested, ignore socket errors */
if (test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags)) if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags))
ret = 0; ret = 0;
if (test_bit(NBD_TIMEDOUT, &config->runtime_flags)) if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags))
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
return ret; return ret;
} }
@ -1269,7 +1287,7 @@ static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
sock_shutdown(nbd); sock_shutdown(nbd);
__invalidate_device(bdev, true); __invalidate_device(bdev, true);
nbd_bdev_reset(bdev); nbd_bdev_reset(bdev);
if (test_and_clear_bit(NBD_HAS_CONFIG_REF, if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
&nbd->config->runtime_flags)) &nbd->config->runtime_flags))
nbd_config_put(nbd); nbd_config_put(nbd);
} }
@ -1364,7 +1382,7 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
/* Don't allow ioctl operations on a nbd device that was created with /* Don't allow ioctl operations on a nbd device that was created with
* netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine. * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
*/ */
if (!test_bit(NBD_BOUND, &config->runtime_flags) || if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
(cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK)) (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
error = __nbd_ioctl(bdev, nbd, cmd, arg); error = __nbd_ioctl(bdev, nbd, cmd, arg);
else else
@ -1435,7 +1453,7 @@ static void nbd_release(struct gendisk *disk, fmode_t mode)
struct nbd_device *nbd = disk->private_data; struct nbd_device *nbd = disk->private_data;
struct block_device *bdev = bdget_disk(disk, 0); struct block_device *bdev = bdget_disk(disk, 0);
if (test_bit(NBD_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) && if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
bdev->bd_openers == 0) bdev->bd_openers == 0)
nbd_disconnect_and_put(nbd); nbd_disconnect_and_put(nbd);
@ -1636,6 +1654,7 @@ static int nbd_dev_add(int index)
nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
BLK_MQ_F_BLOCKING; BLK_MQ_F_BLOCKING;
nbd->tag_set.driver_data = nbd; nbd->tag_set.driver_data = nbd;
nbd->destroy_complete = NULL;
err = blk_mq_alloc_tag_set(&nbd->tag_set); err = blk_mq_alloc_tag_set(&nbd->tag_set);
if (err) if (err)
@ -1750,6 +1769,7 @@ static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
{ {
DECLARE_COMPLETION_ONSTACK(destroy_complete);
struct nbd_device *nbd = NULL; struct nbd_device *nbd = NULL;
struct nbd_config *config; struct nbd_config *config;
int index = -1; int index = -1;
@ -1801,6 +1821,17 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
mutex_unlock(&nbd_index_mutex); mutex_unlock(&nbd_index_mutex);
return -EINVAL; return -EINVAL;
} }
if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) {
nbd->destroy_complete = &destroy_complete;
mutex_unlock(&nbd_index_mutex);
/* Wait untill the the nbd stuff is totally destroyed */
wait_for_completion(&destroy_complete);
goto again;
}
if (!refcount_inc_not_zero(&nbd->refs)) { if (!refcount_inc_not_zero(&nbd->refs)) {
mutex_unlock(&nbd_index_mutex); mutex_unlock(&nbd_index_mutex);
if (index == -1) if (index == -1)
@ -1833,7 +1864,7 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM; return -ENOMEM;
} }
refcount_set(&nbd->config_refs, 1); refcount_set(&nbd->config_refs, 1);
set_bit(NBD_BOUND, &config->runtime_flags); set_bit(NBD_RT_BOUND, &config->runtime_flags);
ret = nbd_genl_size_set(info, nbd); ret = nbd_genl_size_set(info, nbd);
if (ret) if (ret)
@ -1853,12 +1884,15 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) { if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]); u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) { if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
set_bit(NBD_DESTROY_ON_DISCONNECT, set_bit(NBD_RT_DESTROY_ON_DISCONNECT,
&config->runtime_flags); &config->runtime_flags);
set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
put_dev = true; put_dev = true;
} else {
clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
} }
if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) { if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
set_bit(NBD_DISCONNECT_ON_CLOSE, set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
&config->runtime_flags); &config->runtime_flags);
} }
} }
@ -1897,7 +1931,7 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
out: out:
mutex_unlock(&nbd->config_lock); mutex_unlock(&nbd->config_lock);
if (!ret) { if (!ret) {
set_bit(NBD_HAS_CONFIG_REF, &config->runtime_flags); set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags);
refcount_inc(&nbd->config_refs); refcount_inc(&nbd->config_refs);
nbd_connect_reply(info, nbd->index); nbd_connect_reply(info, nbd->index);
} }
@ -1919,7 +1953,7 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
* queue. * queue.
*/ */
flush_workqueue(nbd->recv_workq); flush_workqueue(nbd->recv_workq);
if (test_and_clear_bit(NBD_HAS_CONFIG_REF, if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
&nbd->config->runtime_flags)) &nbd->config->runtime_flags))
nbd_config_put(nbd); nbd_config_put(nbd);
} }
@ -2003,7 +2037,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
mutex_lock(&nbd->config_lock); mutex_lock(&nbd->config_lock);
config = nbd->config; config = nbd->config;
if (!test_bit(NBD_BOUND, &config->runtime_flags) || if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
!nbd->task_recv) { !nbd->task_recv) {
dev_err(nbd_to_dev(nbd), dev_err(nbd_to_dev(nbd),
"not configured, cannot reconfigure\n"); "not configured, cannot reconfigure\n");
@ -2026,20 +2060,22 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) { if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]); u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) { if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT, if (!test_and_set_bit(NBD_RT_DESTROY_ON_DISCONNECT,
&config->runtime_flags)) &config->runtime_flags))
put_dev = true; put_dev = true;
set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
} else { } else {
if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT, if (test_and_clear_bit(NBD_RT_DESTROY_ON_DISCONNECT,
&config->runtime_flags)) &config->runtime_flags))
refcount_inc(&nbd->refs); refcount_inc(&nbd->refs);
clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
} }
if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) { if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
set_bit(NBD_DISCONNECT_ON_CLOSE, set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
&config->runtime_flags); &config->runtime_flags);
} else { } else {
clear_bit(NBD_DISCONNECT_ON_CLOSE, clear_bit(NBD_RT_DISCONNECT_ON_CLOSE,
&config->runtime_flags); &config->runtime_flags);
} }
} }

View File

@ -2594,7 +2594,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
if (ret) if (ret)
return ret; return ret;
if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) { if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
blkdev_put(bdev, FMODE_READ | FMODE_NDELAY); blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
return -EINVAL; return -EINVAL;
} }

View File

@ -345,6 +345,14 @@ static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
#define DEBUG_bytes(bytes, len, msg, ...) do { } while (0) #define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
#endif #endif
static void dm_integrity_prepare(struct request *rq)
{
}
static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
{
}
/* /*
* DM Integrity profile, protection is performed layer above (dm-crypt) * DM Integrity profile, protection is performed layer above (dm-crypt)
*/ */
@ -352,6 +360,8 @@ static const struct blk_integrity_profile dm_integrity_profile = {
.name = "DM-DIF-EXT-TAG", .name = "DM-DIF-EXT-TAG",
.generate_fn = NULL, .generate_fn = NULL,
.verify_fn = NULL, .verify_fn = NULL,
.prepare_fn = dm_integrity_prepare,
.complete_fn = dm_integrity_complete,
}; };
static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map); static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);

View File

@ -666,8 +666,6 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
if (WARN_ON_ONCE(!nvme_ns_has_pi(ns))) if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
return BLK_STS_NOTSUPP; return BLK_STS_NOTSUPP;
control |= NVME_RW_PRINFO_PRACT; control |= NVME_RW_PRINFO_PRACT;
} else if (req_op(req) == REQ_OP_WRITE) {
t10_pi_prepare(req, ns->pi_type);
} }
switch (ns->pi_type) { switch (ns->pi_type) {
@ -690,13 +688,6 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
void nvme_cleanup_cmd(struct request *req) void nvme_cleanup_cmd(struct request *req)
{ {
if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
nvme_req(req)->status == 0) {
struct nvme_ns *ns = req->rq_disk->private_data;
t10_pi_complete(req, ns->pi_type,
blk_rq_bytes(req) >> ns->lba_shift);
}
if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
struct nvme_ns *ns = req->rq_disk->private_data; struct nvme_ns *ns = req->rq_disk->private_data;
struct page *page = req->special_vec.bv_page; struct page *page = req->special_vec.bv_page;

View File

@ -1211,9 +1211,6 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
dix = scsi_prot_sg_count(cmd); dix = scsi_prot_sg_count(cmd);
dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type); dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type);
if (write && dix)
t10_pi_prepare(cmd->request, sdkp->protection_type);
if (dif || dix) if (dif || dix)
protect = sd_setup_protect_cmnd(cmd, dix, dif); protect = sd_setup_protect_cmnd(cmd, dix, dif);
else else
@ -2055,11 +2052,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
"sd_done: completed %d of %d bytes\n", "sd_done: completed %d of %d bytes\n",
good_bytes, scsi_bufflen(SCpnt))); good_bytes, scsi_bufflen(SCpnt)));
if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt) &&
good_bytes)
t10_pi_complete(SCpnt->request, sdkp->protection_type,
good_bytes / scsi_prot_interval(SCpnt));
return good_bytes; return good_bytes;
} }

View File

@ -1524,10 +1524,14 @@ struct blk_integrity_iter {
}; };
typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *); typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
typedef void (integrity_prepare_fn) (struct request *);
typedef void (integrity_complete_fn) (struct request *, unsigned int);
struct blk_integrity_profile { struct blk_integrity_profile {
integrity_processing_fn *generate_fn; integrity_processing_fn *generate_fn;
integrity_processing_fn *verify_fn; integrity_processing_fn *verify_fn;
integrity_prepare_fn *prepare_fn;
integrity_complete_fn *complete_fn;
const char *name; const char *name;
}; };

View File

@ -53,18 +53,4 @@ extern const struct blk_integrity_profile t10_pi_type1_ip;
extern const struct blk_integrity_profile t10_pi_type3_crc; extern const struct blk_integrity_profile t10_pi_type3_crc;
extern const struct blk_integrity_profile t10_pi_type3_ip; extern const struct blk_integrity_profile t10_pi_type3_ip;
#ifdef CONFIG_BLK_DEV_INTEGRITY
extern void t10_pi_prepare(struct request *rq, u8 protection_type);
extern void t10_pi_complete(struct request *rq, u8 protection_type,
unsigned int intervals);
#else
static inline void t10_pi_complete(struct request *rq, u8 protection_type,
unsigned int intervals)
{
}
static inline void t10_pi_prepare(struct request *rq, u8 protection_type)
{
}
#endif
#endif #endif