block-5.19-2022-07-01
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmK+6BcQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgplI3D/4u/UdnvzqYCLu1wHFZAfGXkI62ccOt9A5h R2t9ewK8WNv6eXoZGNo+Lmi+MsTetT+NtpjHX8gv2ZTNfo1MaMX91YnEPOM/Gxze TGlgR9uSDosxFpXMa1ylAzIH5Xhzo6gktMqS+dGj34dbrc26YCeLWRstdA1CkRwn Ttj6qZaeZcAmmAbKPT0RKhqAGq+5Oz7E8xCWNySFpj6QO4e5moWk9z5wynguiz/p jRH1KvzN+mevU7omC8hZR9HN8eOsZE7TWCNzF3g3ieTam/dsX9LxklEhybJ/C1oE abiu8ludeCKhV16PE71e0dAs1VOU+kRkPl51DpNPa00TlJCq7etBOpHnOFJmbbRy uM0dW3h34qqAXHAItcrBxVtbRW817x9ptI64c3WoCBs59DAVjqQD/m64LefdTBJe a7FLYfD2iReHptS5EnVLm1OyJLMiDIFGeJsWZbK0N+BSQEM9YVNxHAvn/tLahr/Y Ym78erLttFnpzf2qD1H0RwV0Ngj9IWjJWDkQR23akiTctPq2mKFXe+GAyHlqz87h RjyMpMth+s5ov82K/V7duZTmxuxMnCbx9pk4HpGRXajz2Njw3Z6hMmrS574m0Osp 15y4NNIiJ3DfmqhprGteaLLcGlbFATzdjI5LPkDhKzIodfeLnsBq1QvTNaUH4O6F mg3AOo10RQ== =YVop -----END PGP SIGNATURE----- Merge tag 'block-5.19-2022-07-01' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: - Fix for batch getting of tags in sbitmap (wuchi) - NVMe pull request via Christoph: - More quirks (Lamarque Vieira Souza, Pablo Greco) - Fix a fabrics disconnect regression (Ruozhu Li) - Fix a nvmet-tcp data_digest calculation regression (Sagi Grimberg) - Fix nvme-tcp send failure handling (Sagi Grimberg) - Fix a regression with nvmet-loop and passthrough controllers (Alan Adamson) * tag 'block-5.19-2022-07-01' of git://git.kernel.dk/linux-block: nvme-pci: add NVME_QUIRK_BOGUS_NID for ADATA IM2P33F8ABR1 nvmet: add a clear_ids attribute for passthru targets nvme: fix regression when disconnect a recovering ctrl nvme-pci: add NVME_QUIRK_BOGUS_NID for ADATA XPG SX6000LNP (AKA SPECTRIX S40G) nvme-tcp: always fail a request when sending it failed nvmet-tcp: fix regression in data_digest calculation lib/sbitmap: Fix invalid loop in __sbitmap_queue_get_batch()
This commit is contained in:
commit
d516e221e2
|
@ -4595,6 +4595,8 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
|
|||
nvme_stop_failfast_work(ctrl);
|
||||
flush_work(&ctrl->async_event_work);
|
||||
cancel_work_sync(&ctrl->fw_act_work);
|
||||
if (ctrl->ops->stop_ctrl)
|
||||
ctrl->ops->stop_ctrl(ctrl);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
|
||||
|
||||
|
|
|
@ -502,6 +502,7 @@ struct nvme_ctrl_ops {
|
|||
void (*free_ctrl)(struct nvme_ctrl *ctrl);
|
||||
void (*submit_async_event)(struct nvme_ctrl *ctrl);
|
||||
void (*delete_ctrl)(struct nvme_ctrl *ctrl);
|
||||
void (*stop_ctrl)(struct nvme_ctrl *ctrl);
|
||||
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
|
||||
void (*print_device_info)(struct nvme_ctrl *ctrl);
|
||||
};
|
||||
|
|
|
@ -3469,8 +3469,11 @@ static const struct pci_device_id nvme_id_table[] = {
|
|||
{ PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */
|
||||
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
|
||||
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||
{ PCI_DEVICE(0x1cc1, 0x33f8), /* ADATA IM2P33F8ABR1 1 TB */
|
||||
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
|
||||
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
|
||||
NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
|
||||
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
|
||||
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||
|
|
|
@ -1048,6 +1048,14 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
|
|||
}
|
||||
}
|
||||
|
||||
static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl)
|
||||
{
|
||||
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
|
||||
|
||||
cancel_work_sync(&ctrl->err_work);
|
||||
cancel_delayed_work_sync(&ctrl->reconnect_work);
|
||||
}
|
||||
|
||||
static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
|
||||
{
|
||||
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
|
||||
|
@ -2252,9 +2260,6 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
|
|||
|
||||
static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
|
||||
{
|
||||
cancel_work_sync(&ctrl->err_work);
|
||||
cancel_delayed_work_sync(&ctrl->reconnect_work);
|
||||
|
||||
nvme_rdma_teardown_io_queues(ctrl, shutdown);
|
||||
nvme_stop_admin_queue(&ctrl->ctrl);
|
||||
if (shutdown)
|
||||
|
@ -2304,6 +2309,7 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
|
|||
.submit_async_event = nvme_rdma_submit_async_event,
|
||||
.delete_ctrl = nvme_rdma_delete_ctrl,
|
||||
.get_address = nvmf_get_address,
|
||||
.stop_ctrl = nvme_rdma_stop_ctrl,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -1180,8 +1180,7 @@ static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
|
|||
} else if (ret < 0) {
|
||||
dev_err(queue->ctrl->ctrl.device,
|
||||
"failed to send request %d\n", ret);
|
||||
if (ret != -EPIPE && ret != -ECONNRESET)
|
||||
nvme_tcp_fail_request(queue->request);
|
||||
nvme_tcp_fail_request(queue->request);
|
||||
nvme_tcp_done_send_req(queue);
|
||||
}
|
||||
return ret;
|
||||
|
@ -2194,9 +2193,6 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
|
|||
|
||||
static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
|
||||
{
|
||||
cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
|
||||
cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
|
||||
|
||||
nvme_tcp_teardown_io_queues(ctrl, shutdown);
|
||||
nvme_stop_admin_queue(ctrl);
|
||||
if (shutdown)
|
||||
|
@ -2236,6 +2232,12 @@ static void nvme_reset_ctrl_work(struct work_struct *work)
|
|||
nvme_tcp_reconnect_or_remove(ctrl);
|
||||
}
|
||||
|
||||
static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
|
||||
cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
|
||||
}
|
||||
|
||||
static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
|
||||
{
|
||||
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
|
||||
|
@ -2557,6 +2559,7 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
|
|||
.submit_async_event = nvme_tcp_submit_async_event,
|
||||
.delete_ctrl = nvme_tcp_delete_ctrl,
|
||||
.get_address = nvmf_get_address,
|
||||
.stop_ctrl = nvme_tcp_stop_ctrl,
|
||||
};
|
||||
|
||||
static bool
|
||||
|
|
|
@ -773,11 +773,31 @@ static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
|
|||
}
|
||||
CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
|
||||
|
||||
static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item,
|
||||
char *page)
|
||||
{
|
||||
return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids);
|
||||
}
|
||||
|
||||
static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item,
|
||||
const char *page, size_t count)
|
||||
{
|
||||
struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
|
||||
unsigned int clear_ids;
|
||||
|
||||
if (kstrtouint(page, 0, &clear_ids))
|
||||
return -EINVAL;
|
||||
subsys->clear_ids = clear_ids;
|
||||
return count;
|
||||
}
|
||||
CONFIGFS_ATTR(nvmet_passthru_, clear_ids);
|
||||
|
||||
static struct configfs_attribute *nvmet_passthru_attrs[] = {
|
||||
&nvmet_passthru_attr_device_path,
|
||||
&nvmet_passthru_attr_enable,
|
||||
&nvmet_passthru_attr_admin_timeout,
|
||||
&nvmet_passthru_attr_io_timeout,
|
||||
&nvmet_passthru_attr_clear_ids,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
|
|
@ -1374,6 +1374,12 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
|
|||
ctrl->port = req->port;
|
||||
ctrl->ops = req->ops;
|
||||
|
||||
#ifdef CONFIG_NVME_TARGET_PASSTHRU
|
||||
/* By default, set loop targets to clear IDS by default */
|
||||
if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP)
|
||||
subsys->clear_ids = 1;
|
||||
#endif
|
||||
|
||||
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
|
||||
INIT_LIST_HEAD(&ctrl->async_events);
|
||||
INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
|
||||
|
|
|
@ -249,6 +249,7 @@ struct nvmet_subsys {
|
|||
struct config_group passthru_group;
|
||||
unsigned int admin_timeout;
|
||||
unsigned int io_timeout;
|
||||
unsigned int clear_ids;
|
||||
#endif /* CONFIG_NVME_TARGET_PASSTHRU */
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
|
|
|
@ -30,6 +30,53 @@ void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
|
|||
ctrl->cap &= ~(1ULL << 43);
|
||||
}
|
||||
|
||||
static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
|
||||
{
|
||||
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
||||
u16 status = NVME_SC_SUCCESS;
|
||||
int pos, len;
|
||||
bool csi_seen = false;
|
||||
void *data;
|
||||
u8 csi;
|
||||
|
||||
if (!ctrl->subsys->clear_ids)
|
||||
return status;
|
||||
|
||||
data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
|
||||
if (!data)
|
||||
return NVME_SC_INTERNAL;
|
||||
|
||||
status = nvmet_copy_from_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
|
||||
if (status)
|
||||
goto out_free;
|
||||
|
||||
for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
|
||||
struct nvme_ns_id_desc *cur = data + pos;
|
||||
|
||||
if (cur->nidl == 0)
|
||||
break;
|
||||
if (cur->nidt == NVME_NIDT_CSI) {
|
||||
memcpy(&csi, cur + 1, NVME_NIDT_CSI_LEN);
|
||||
csi_seen = true;
|
||||
break;
|
||||
}
|
||||
len = sizeof(struct nvme_ns_id_desc) + cur->nidl;
|
||||
}
|
||||
|
||||
memset(data, 0, NVME_IDENTIFY_DATA_SIZE);
|
||||
if (csi_seen) {
|
||||
struct nvme_ns_id_desc *cur = data;
|
||||
|
||||
cur->nidt = NVME_NIDT_CSI;
|
||||
cur->nidl = NVME_NIDT_CSI_LEN;
|
||||
memcpy(cur + 1, &csi, NVME_NIDT_CSI_LEN);
|
||||
}
|
||||
status = nvmet_copy_to_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
|
||||
out_free:
|
||||
kfree(data);
|
||||
return status;
|
||||
}
|
||||
|
||||
static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
|
||||
{
|
||||
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
||||
|
@ -152,6 +199,11 @@ static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
|
|||
*/
|
||||
id->mc = 0;
|
||||
|
||||
if (req->sq->ctrl->subsys->clear_ids) {
|
||||
memset(id->nguid, 0, NVME_NIDT_NGUID_LEN);
|
||||
memset(id->eui64, 0, NVME_NIDT_EUI64_LEN);
|
||||
}
|
||||
|
||||
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
|
||||
|
||||
out_free:
|
||||
|
@ -176,6 +228,9 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
|
|||
case NVME_ID_CNS_NS:
|
||||
nvmet_passthru_override_id_ns(req);
|
||||
break;
|
||||
case NVME_ID_CNS_NS_DESC_LIST:
|
||||
nvmet_passthru_override_id_descs(req);
|
||||
break;
|
||||
}
|
||||
} else if (status < 0)
|
||||
status = NVME_SC_INTERNAL;
|
||||
|
|
|
@ -405,7 +405,7 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
|
|||
return NVME_SC_INTERNAL;
|
||||
}
|
||||
|
||||
static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
|
||||
static void nvmet_tcp_calc_ddgst(struct ahash_request *hash,
|
||||
struct nvmet_tcp_cmd *cmd)
|
||||
{
|
||||
ahash_request_set_crypt(hash, cmd->req.sg,
|
||||
|
@ -413,23 +413,6 @@ static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
|
|||
crypto_ahash_digest(hash);
|
||||
}
|
||||
|
||||
static void nvmet_tcp_recv_ddgst(struct ahash_request *hash,
|
||||
struct nvmet_tcp_cmd *cmd)
|
||||
{
|
||||
struct scatterlist sg;
|
||||
struct kvec *iov;
|
||||
int i;
|
||||
|
||||
crypto_ahash_init(hash);
|
||||
for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) {
|
||||
sg_init_one(&sg, iov->iov_base, iov->iov_len);
|
||||
ahash_request_set_crypt(hash, &sg, NULL, iov->iov_len);
|
||||
crypto_ahash_update(hash);
|
||||
}
|
||||
ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0);
|
||||
crypto_ahash_final(hash);
|
||||
}
|
||||
|
||||
static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
|
||||
{
|
||||
struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
|
||||
|
@ -454,7 +437,7 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
|
|||
|
||||
if (queue->data_digest) {
|
||||
pdu->hdr.flags |= NVME_TCP_F_DDGST;
|
||||
nvmet_tcp_send_ddgst(queue->snd_hash, cmd);
|
||||
nvmet_tcp_calc_ddgst(queue->snd_hash, cmd);
|
||||
}
|
||||
|
||||
if (cmd->queue->hdr_digest) {
|
||||
|
@ -1137,7 +1120,7 @@ static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
|
|||
{
|
||||
struct nvmet_tcp_queue *queue = cmd->queue;
|
||||
|
||||
nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd);
|
||||
nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd);
|
||||
queue->offset = 0;
|
||||
queue->left = NVME_TCP_DIGEST_LENGTH;
|
||||
queue->rcv_state = NVMET_TCP_RECV_DDGST;
|
||||
|
|
|
@ -528,7 +528,7 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
|
|||
|
||||
sbitmap_deferred_clear(map);
|
||||
if (map->word == (1UL << (map_depth - 1)) - 1)
|
||||
continue;
|
||||
goto next;
|
||||
|
||||
nr = find_first_zero_bit(&map->word, map_depth);
|
||||
if (nr + nr_tags <= map_depth) {
|
||||
|
@ -539,6 +539,8 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
|
|||
get_mask = ((1UL << map_tags) - 1) << nr;
|
||||
do {
|
||||
val = READ_ONCE(map->word);
|
||||
if ((val & ~get_mask) != val)
|
||||
goto next;
|
||||
ret = atomic_long_cmpxchg(ptr, val, get_mask | val);
|
||||
} while (ret != val);
|
||||
get_mask = (get_mask & ~ret) >> nr;
|
||||
|
@ -549,6 +551,7 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
|
|||
return get_mask;
|
||||
}
|
||||
}
|
||||
next:
|
||||
/* Jump to next index. */
|
||||
if (++index >= sb->map_nr)
|
||||
index = 0;
|
||||
|
|
Loading…
Reference in New Issue