nvme fixes for Linux 5.19
- more quirks (Lamarque Vieira Souza, Pablo Greco) - fix a fabrics disconnect regression (Ruozhu Li) - fix a nvmet-tcp data_digest calculation regression (Sagi Grimberg) - fix nvme-tcp send failure handling (Sagi Grimberg) - fix a regression with nvmet-loop and passthrough controllers (Alan Adamson) -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmK9/P0LHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYNX8hAAt40pq6crg4rC1gpotiUCD3iajxOWkdZHOIIoxo+q gZHgSZoaWa5YawNHaJH1gZtPb2iWsh5V1wmbOpjGxX+J+xi3soUE2+w+tHK4S7q7 ron3aCTQS1icFpTpWeHlzgSe+xmlfY9OXzQw7Uwj1hDCRpUEpYjqjsvzwlduOkIb lJqP1iE2ye7kNXkdS6zRiq0mp8SuU/e6hO33Z8CXR5uqnU7YAtrsge5WV0YaWYJr UMt9W57zecWaJHFPUC1qB57ERZMBMzLuD4CE5TT7oIuepfu8Kb1jmCT1q62XE0K4 JesC/97+pYXf+aHa5ThOZYsIQxCp9sqbUs7u37yFcNNq9xFZSbnGLEgpl0F8BOsX fFE0ZjTu0K7JjVMS6db33TDSlBwlCgDlLy3d3mcIUzen/GWyMZ6o7ElSXIHcifNf N3z4exI+MNEBXX/5QsIImskAnccBaa/lttL4GjJmgfAG5f2rKkNjq/uSsAgCZaFu E+Njp8EYh8U3v3oejb4gAmSrveXa5F4P+opgM54prA/6pt5R1viIuJaIcgomnVRc GZnFu/jogWcXQaHQeguQ84GBWK3Q3tz5ejdINYy5a8xrKdkfElc9LrgB6+TfZYnf spos1cA8N7Icndcu9CviJy8kezJmcOaAgCYDeWNVCBZrc/pnxQ0Qh0wAYGr8WuTw VBA= =0qoc -----END PGP SIGNATURE----- Merge tag 'nvme-5.19-2022-06-30' of git://git.infradead.org/nvme into block-5.19 Pull NVMe fixes from Christoph: "nvme fixes for Linux 5.19 - more quirks (Lamarque Vieira Souza, Pablo Greco) - fix a fabrics disconnect regression (Ruozhu Li) - fix a nvmet-tcp data_digest calculation regression (Sagi Grimberg) - fix nvme-tcp send failure handling (Sagi Grimberg) - fix a regression with nvmet-loop and passthrough controllers (Alan Adamson)" * tag 'nvme-5.19-2022-06-30' of git://git.infradead.org/nvme: nvme-pci: add NVME_QUIRK_BOGUS_NID for ADATA IM2P33F8ABR1 nvmet: add a clear_ids attribute for passthru targets nvme: fix regression when disconnect a recovering ctrl nvme-pci: add NVME_QUIRK_BOGUS_NID for ADATA XPG SX6000LNP (AKA SPECTRIX S40G) nvme-tcp: always fail a request when sending it failed nvmet-tcp: fix regression in data_digest calculation
This commit is contained in:
commit
f3163d8567
|
@ -4595,6 +4595,8 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
|
||||||
nvme_stop_failfast_work(ctrl);
|
nvme_stop_failfast_work(ctrl);
|
||||||
flush_work(&ctrl->async_event_work);
|
flush_work(&ctrl->async_event_work);
|
||||||
cancel_work_sync(&ctrl->fw_act_work);
|
cancel_work_sync(&ctrl->fw_act_work);
|
||||||
|
if (ctrl->ops->stop_ctrl)
|
||||||
|
ctrl->ops->stop_ctrl(ctrl);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
|
EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
|
||||||
|
|
||||||
|
|
|
@ -502,6 +502,7 @@ struct nvme_ctrl_ops {
|
||||||
void (*free_ctrl)(struct nvme_ctrl *ctrl);
|
void (*free_ctrl)(struct nvme_ctrl *ctrl);
|
||||||
void (*submit_async_event)(struct nvme_ctrl *ctrl);
|
void (*submit_async_event)(struct nvme_ctrl *ctrl);
|
||||||
void (*delete_ctrl)(struct nvme_ctrl *ctrl);
|
void (*delete_ctrl)(struct nvme_ctrl *ctrl);
|
||||||
|
void (*stop_ctrl)(struct nvme_ctrl *ctrl);
|
||||||
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
|
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
|
||||||
void (*print_device_info)(struct nvme_ctrl *ctrl);
|
void (*print_device_info)(struct nvme_ctrl *ctrl);
|
||||||
};
|
};
|
||||||
|
|
|
@ -3469,8 +3469,11 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||||
{ PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */
|
{ PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */
|
||||||
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
|
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
|
||||||
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||||
|
{ PCI_DEVICE(0x1cc1, 0x33f8), /* ADATA IM2P33F8ABR1 1 TB */
|
||||||
|
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
||||||
{ PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
|
{ PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
|
||||||
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
|
||||||
|
NVME_QUIRK_BOGUS_NID, },
|
||||||
{ PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
|
{ PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
|
||||||
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
|
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
|
||||||
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||||
|
|
|
@ -1048,6 +1048,14 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl)
|
||||||
|
{
|
||||||
|
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
|
||||||
|
|
||||||
|
cancel_work_sync(&ctrl->err_work);
|
||||||
|
cancel_delayed_work_sync(&ctrl->reconnect_work);
|
||||||
|
}
|
||||||
|
|
||||||
static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
|
static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
|
||||||
{
|
{
|
||||||
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
|
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
|
||||||
|
@ -2252,9 +2260,6 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
|
||||||
|
|
||||||
static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
|
static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
|
||||||
{
|
{
|
||||||
cancel_work_sync(&ctrl->err_work);
|
|
||||||
cancel_delayed_work_sync(&ctrl->reconnect_work);
|
|
||||||
|
|
||||||
nvme_rdma_teardown_io_queues(ctrl, shutdown);
|
nvme_rdma_teardown_io_queues(ctrl, shutdown);
|
||||||
nvme_stop_admin_queue(&ctrl->ctrl);
|
nvme_stop_admin_queue(&ctrl->ctrl);
|
||||||
if (shutdown)
|
if (shutdown)
|
||||||
|
@ -2304,6 +2309,7 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
|
||||||
.submit_async_event = nvme_rdma_submit_async_event,
|
.submit_async_event = nvme_rdma_submit_async_event,
|
||||||
.delete_ctrl = nvme_rdma_delete_ctrl,
|
.delete_ctrl = nvme_rdma_delete_ctrl,
|
||||||
.get_address = nvmf_get_address,
|
.get_address = nvmf_get_address,
|
||||||
|
.stop_ctrl = nvme_rdma_stop_ctrl,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -1180,8 +1180,7 @@ static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
|
||||||
} else if (ret < 0) {
|
} else if (ret < 0) {
|
||||||
dev_err(queue->ctrl->ctrl.device,
|
dev_err(queue->ctrl->ctrl.device,
|
||||||
"failed to send request %d\n", ret);
|
"failed to send request %d\n", ret);
|
||||||
if (ret != -EPIPE && ret != -ECONNRESET)
|
nvme_tcp_fail_request(queue->request);
|
||||||
nvme_tcp_fail_request(queue->request);
|
|
||||||
nvme_tcp_done_send_req(queue);
|
nvme_tcp_done_send_req(queue);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -2194,9 +2193,6 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
|
||||||
|
|
||||||
static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
|
static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
|
||||||
{
|
{
|
||||||
cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
|
|
||||||
cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
|
|
||||||
|
|
||||||
nvme_tcp_teardown_io_queues(ctrl, shutdown);
|
nvme_tcp_teardown_io_queues(ctrl, shutdown);
|
||||||
nvme_stop_admin_queue(ctrl);
|
nvme_stop_admin_queue(ctrl);
|
||||||
if (shutdown)
|
if (shutdown)
|
||||||
|
@ -2236,6 +2232,12 @@ static void nvme_reset_ctrl_work(struct work_struct *work)
|
||||||
nvme_tcp_reconnect_or_remove(ctrl);
|
nvme_tcp_reconnect_or_remove(ctrl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
|
||||||
|
{
|
||||||
|
cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
|
||||||
|
cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
|
||||||
|
}
|
||||||
|
|
||||||
static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
|
static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
|
||||||
{
|
{
|
||||||
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
|
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
|
||||||
|
@ -2557,6 +2559,7 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
|
||||||
.submit_async_event = nvme_tcp_submit_async_event,
|
.submit_async_event = nvme_tcp_submit_async_event,
|
||||||
.delete_ctrl = nvme_tcp_delete_ctrl,
|
.delete_ctrl = nvme_tcp_delete_ctrl,
|
||||||
.get_address = nvmf_get_address,
|
.get_address = nvmf_get_address,
|
||||||
|
.stop_ctrl = nvme_tcp_stop_ctrl,
|
||||||
};
|
};
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
|
|
|
@ -773,11 +773,31 @@ static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
|
||||||
}
|
}
|
||||||
CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
|
CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
|
||||||
|
|
||||||
|
static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item,
|
||||||
|
char *page)
|
||||||
|
{
|
||||||
|
return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item,
|
||||||
|
const char *page, size_t count)
|
||||||
|
{
|
||||||
|
struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
|
||||||
|
unsigned int clear_ids;
|
||||||
|
|
||||||
|
if (kstrtouint(page, 0, &clear_ids))
|
||||||
|
return -EINVAL;
|
||||||
|
subsys->clear_ids = clear_ids;
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
CONFIGFS_ATTR(nvmet_passthru_, clear_ids);
|
||||||
|
|
||||||
static struct configfs_attribute *nvmet_passthru_attrs[] = {
|
static struct configfs_attribute *nvmet_passthru_attrs[] = {
|
||||||
&nvmet_passthru_attr_device_path,
|
&nvmet_passthru_attr_device_path,
|
||||||
&nvmet_passthru_attr_enable,
|
&nvmet_passthru_attr_enable,
|
||||||
&nvmet_passthru_attr_admin_timeout,
|
&nvmet_passthru_attr_admin_timeout,
|
||||||
&nvmet_passthru_attr_io_timeout,
|
&nvmet_passthru_attr_io_timeout,
|
||||||
|
&nvmet_passthru_attr_clear_ids,
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1374,6 +1374,12 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
|
||||||
ctrl->port = req->port;
|
ctrl->port = req->port;
|
||||||
ctrl->ops = req->ops;
|
ctrl->ops = req->ops;
|
||||||
|
|
||||||
|
#ifdef CONFIG_NVME_TARGET_PASSTHRU
|
||||||
|
/* By default, set loop targets to clear IDS by default */
|
||||||
|
if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP)
|
||||||
|
subsys->clear_ids = 1;
|
||||||
|
#endif
|
||||||
|
|
||||||
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
|
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
|
||||||
INIT_LIST_HEAD(&ctrl->async_events);
|
INIT_LIST_HEAD(&ctrl->async_events);
|
||||||
INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
|
INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
|
||||||
|
|
|
@ -249,6 +249,7 @@ struct nvmet_subsys {
|
||||||
struct config_group passthru_group;
|
struct config_group passthru_group;
|
||||||
unsigned int admin_timeout;
|
unsigned int admin_timeout;
|
||||||
unsigned int io_timeout;
|
unsigned int io_timeout;
|
||||||
|
unsigned int clear_ids;
|
||||||
#endif /* CONFIG_NVME_TARGET_PASSTHRU */
|
#endif /* CONFIG_NVME_TARGET_PASSTHRU */
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_ZONED
|
#ifdef CONFIG_BLK_DEV_ZONED
|
||||||
|
|
|
@ -30,6 +30,53 @@ void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
|
||||||
ctrl->cap &= ~(1ULL << 43);
|
ctrl->cap &= ~(1ULL << 43);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
|
||||||
|
{
|
||||||
|
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
||||||
|
u16 status = NVME_SC_SUCCESS;
|
||||||
|
int pos, len;
|
||||||
|
bool csi_seen = false;
|
||||||
|
void *data;
|
||||||
|
u8 csi;
|
||||||
|
|
||||||
|
if (!ctrl->subsys->clear_ids)
|
||||||
|
return status;
|
||||||
|
|
||||||
|
data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
|
||||||
|
if (!data)
|
||||||
|
return NVME_SC_INTERNAL;
|
||||||
|
|
||||||
|
status = nvmet_copy_from_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
|
||||||
|
if (status)
|
||||||
|
goto out_free;
|
||||||
|
|
||||||
|
for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
|
||||||
|
struct nvme_ns_id_desc *cur = data + pos;
|
||||||
|
|
||||||
|
if (cur->nidl == 0)
|
||||||
|
break;
|
||||||
|
if (cur->nidt == NVME_NIDT_CSI) {
|
||||||
|
memcpy(&csi, cur + 1, NVME_NIDT_CSI_LEN);
|
||||||
|
csi_seen = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
len = sizeof(struct nvme_ns_id_desc) + cur->nidl;
|
||||||
|
}
|
||||||
|
|
||||||
|
memset(data, 0, NVME_IDENTIFY_DATA_SIZE);
|
||||||
|
if (csi_seen) {
|
||||||
|
struct nvme_ns_id_desc *cur = data;
|
||||||
|
|
||||||
|
cur->nidt = NVME_NIDT_CSI;
|
||||||
|
cur->nidl = NVME_NIDT_CSI_LEN;
|
||||||
|
memcpy(cur + 1, &csi, NVME_NIDT_CSI_LEN);
|
||||||
|
}
|
||||||
|
status = nvmet_copy_to_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
|
||||||
|
out_free:
|
||||||
|
kfree(data);
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
|
static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
|
||||||
{
|
{
|
||||||
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
||||||
|
@ -152,6 +199,11 @@ static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
|
||||||
*/
|
*/
|
||||||
id->mc = 0;
|
id->mc = 0;
|
||||||
|
|
||||||
|
if (req->sq->ctrl->subsys->clear_ids) {
|
||||||
|
memset(id->nguid, 0, NVME_NIDT_NGUID_LEN);
|
||||||
|
memset(id->eui64, 0, NVME_NIDT_EUI64_LEN);
|
||||||
|
}
|
||||||
|
|
||||||
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
|
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
|
||||||
|
|
||||||
out_free:
|
out_free:
|
||||||
|
@ -176,6 +228,9 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
|
||||||
case NVME_ID_CNS_NS:
|
case NVME_ID_CNS_NS:
|
||||||
nvmet_passthru_override_id_ns(req);
|
nvmet_passthru_override_id_ns(req);
|
||||||
break;
|
break;
|
||||||
|
case NVME_ID_CNS_NS_DESC_LIST:
|
||||||
|
nvmet_passthru_override_id_descs(req);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
} else if (status < 0)
|
} else if (status < 0)
|
||||||
status = NVME_SC_INTERNAL;
|
status = NVME_SC_INTERNAL;
|
||||||
|
|
|
@ -405,7 +405,7 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
|
||||||
return NVME_SC_INTERNAL;
|
return NVME_SC_INTERNAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
|
static void nvmet_tcp_calc_ddgst(struct ahash_request *hash,
|
||||||
struct nvmet_tcp_cmd *cmd)
|
struct nvmet_tcp_cmd *cmd)
|
||||||
{
|
{
|
||||||
ahash_request_set_crypt(hash, cmd->req.sg,
|
ahash_request_set_crypt(hash, cmd->req.sg,
|
||||||
|
@ -413,23 +413,6 @@ static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
|
||||||
crypto_ahash_digest(hash);
|
crypto_ahash_digest(hash);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvmet_tcp_recv_ddgst(struct ahash_request *hash,
|
|
||||||
struct nvmet_tcp_cmd *cmd)
|
|
||||||
{
|
|
||||||
struct scatterlist sg;
|
|
||||||
struct kvec *iov;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
crypto_ahash_init(hash);
|
|
||||||
for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) {
|
|
||||||
sg_init_one(&sg, iov->iov_base, iov->iov_len);
|
|
||||||
ahash_request_set_crypt(hash, &sg, NULL, iov->iov_len);
|
|
||||||
crypto_ahash_update(hash);
|
|
||||||
}
|
|
||||||
ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0);
|
|
||||||
crypto_ahash_final(hash);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
|
static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
|
||||||
{
|
{
|
||||||
struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
|
struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
|
||||||
|
@ -454,7 +437,7 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
|
||||||
|
|
||||||
if (queue->data_digest) {
|
if (queue->data_digest) {
|
||||||
pdu->hdr.flags |= NVME_TCP_F_DDGST;
|
pdu->hdr.flags |= NVME_TCP_F_DDGST;
|
||||||
nvmet_tcp_send_ddgst(queue->snd_hash, cmd);
|
nvmet_tcp_calc_ddgst(queue->snd_hash, cmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cmd->queue->hdr_digest) {
|
if (cmd->queue->hdr_digest) {
|
||||||
|
@ -1137,7 +1120,7 @@ static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
|
||||||
{
|
{
|
||||||
struct nvmet_tcp_queue *queue = cmd->queue;
|
struct nvmet_tcp_queue *queue = cmd->queue;
|
||||||
|
|
||||||
nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd);
|
nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd);
|
||||||
queue->offset = 0;
|
queue->offset = 0;
|
||||||
queue->left = NVME_TCP_DIGEST_LENGTH;
|
queue->left = NVME_TCP_DIGEST_LENGTH;
|
||||||
queue->rcv_state = NVMET_TCP_RECV_DDGST;
|
queue->rcv_state = NVMET_TCP_RECV_DDGST;
|
||||||
|
|
Loading…
Reference in New Issue