mirror of https://gitee.com/openkylin/linux.git
nvme fixes for Linux 5.15
- fix ANA state updates when a namespace is not present (Anton Eidelman) - nvmet: fix a width vs precision bug in nvmet_subsys_attr_serial_show (Dan Carpenter) - avoid race in shutdown namespace removal (Daniel Wagner) - fix io_work priority inversion in nvme-tcp (Keith Busch) - destroy cm id before destroy qp to avoid use after free (Ruozhu Li) -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmFB+aQLHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYOfPg//fREqsvY1eMkexP/fOyrpVpLH1lLLM2SHUYwyQOtg 4CVxl2KU8YGyGLGpAMO6ToctrNV83oUQUQlgy7PyQQpUzr6m4sJm7Lbxzvqhyh96 MKeHRcWo173twiCLICx4GADSmlwofmwUSzBXdhEtt1gNmg26T+nnDewHpSqLY5Ax BNAtFkAGTZodupVKUNsI+tqAyXY3Bgk069YqDiZfBCqU62JzLj6QX0aVXcpS2kcv bVpJ0kbnuoUkHG/eIrLZeiyvikxQWx1KaLaL3Y24wrdkVWmZJpCYbg/gXq5v9GUQ U8yq/iuOSTtfE0+7hNCyNz8tNLfzxejd0jDCfQcPwXOuzIfNkdzdqhkhste2HYB5 uriDj1eTQq1siH6kKu8oYQBMSxMSw9wM5q71d5QX2XJzw3hl9bqw5GqiT8Gjk1pX K759Tj+IMFNa1SqUGI5IBfkHz4vvuwOr+pO60fwd3CzajOI8mNnNAuJ+1I2UJaEP cgq+coD950BsWCyhL+XO/NhYPUSugI331JQqlIkVPPYxqBCJK22c7rZusM0Sm4Gq aweWKIxtI9cbsrlf6WfJdyHf5clZ9AjMmWEzHFbDeGqUhb3pTFoT5H/UqSw9/6xe pi7iKS6lBoZAoWKfyt9bOKfmaDlcrylAxPQ/LfyJGSTy5Buj5bcc4FO5rRJ+BIay t88= =wad8 -----END PGP SIGNATURE----- Merge tag 'nvme-5.15-2021-09-15' of git://git.infradead.org/nvme into block-5.15 Pull NVMe fixes from Christoph: "nvme fixes for Linux 5.15 - fix ANA state updates when a namespace is not present (Anton Eidelman) - nvmet: fix a width vs precision bug in nvmet_subsys_attr_serial_show (Dan Carpenter) - avoid race in shutdown namespace removal (Daniel Wagner) - fix io_work priority inversion in nvme-tcp (Keith Busch) - destroy cm id before destroy qp to avoid use after free (Ruozhu Li)" * tag 'nvme-5.15-2021-09-15' of git://git.infradead.org/nvme: nvme-tcp: fix io_work priority inversion nvme-rdma: destroy cm id before destroy qp to avoid use after free nvme-multipath: fix ANA state updates when a namespace is not present nvme: avoid race in shutdown namespace removal nvmet: fix a width vs precision bug in nvmet_subsys_attr_serial_show()
This commit is contained in:
commit
65ed1e692f
|
@ -3524,7 +3524,9 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
|
|||
lockdep_assert_held(&subsys->lock);
|
||||
|
||||
list_for_each_entry(h, &subsys->nsheads, entry) {
|
||||
if (h->ns_id == nsid && nvme_tryget_ns_head(h))
|
||||
if (h->ns_id != nsid)
|
||||
continue;
|
||||
if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
|
||||
return h;
|
||||
}
|
||||
|
||||
|
@ -3843,6 +3845,10 @@ static void nvme_ns_remove(struct nvme_ns *ns)
|
|||
|
||||
mutex_lock(&ns->ctrl->subsys->lock);
|
||||
list_del_rcu(&ns->siblings);
|
||||
if (list_empty(&ns->head->list)) {
|
||||
list_del_init(&ns->head->entry);
|
||||
last_path = true;
|
||||
}
|
||||
mutex_unlock(&ns->ctrl->subsys->lock);
|
||||
|
||||
/* guarantee not available in head->list */
|
||||
|
@ -3861,13 +3867,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
|
|||
list_del_init(&ns->list);
|
||||
up_write(&ns->ctrl->namespaces_rwsem);
|
||||
|
||||
/* Synchronize with nvme_init_ns_head() */
|
||||
mutex_lock(&ns->head->subsys->lock);
|
||||
if (list_empty(&ns->head->list)) {
|
||||
list_del_init(&ns->head->entry);
|
||||
last_path = true;
|
||||
}
|
||||
mutex_unlock(&ns->head->subsys->lock);
|
||||
if (last_path)
|
||||
nvme_mpath_shutdown_disk(ns->head);
|
||||
nvme_put_ns(ns);
|
||||
|
|
|
@ -600,14 +600,17 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
|
|||
|
||||
down_read(&ctrl->namespaces_rwsem);
|
||||
list_for_each_entry(ns, &ctrl->namespaces, list) {
|
||||
unsigned nsid = le32_to_cpu(desc->nsids[n]);
|
||||
|
||||
unsigned nsid;
|
||||
again:
|
||||
nsid = le32_to_cpu(desc->nsids[n]);
|
||||
if (ns->head->ns_id < nsid)
|
||||
continue;
|
||||
if (ns->head->ns_id == nsid)
|
||||
nvme_update_ns_ana_state(desc, ns);
|
||||
if (++n == nr_nsids)
|
||||
break;
|
||||
if (ns->head->ns_id > nsid)
|
||||
goto again;
|
||||
}
|
||||
up_read(&ctrl->namespaces_rwsem);
|
||||
return 0;
|
||||
|
|
|
@ -656,8 +656,8 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
|
|||
if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
|
||||
return;
|
||||
|
||||
nvme_rdma_destroy_queue_ib(queue);
|
||||
rdma_destroy_id(queue->cm_id);
|
||||
nvme_rdma_destroy_queue_ib(queue);
|
||||
mutex_destroy(&queue->queue_lock);
|
||||
}
|
||||
|
||||
|
@ -1815,14 +1815,10 @@ static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
|
|||
for (i = 0; i < queue->queue_size; i++) {
|
||||
ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
|
||||
if (ret)
|
||||
goto out_destroy_queue_ib;
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_destroy_queue_ib:
|
||||
nvme_rdma_destroy_queue_ib(queue);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
|
||||
|
@ -1916,14 +1912,10 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
|
|||
if (ret) {
|
||||
dev_err(ctrl->ctrl.device,
|
||||
"rdma_connect_locked failed (%d).\n", ret);
|
||||
goto out_destroy_queue_ib;
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_destroy_queue_ib:
|
||||
nvme_rdma_destroy_queue_ib(queue);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
|
||||
|
@ -1954,8 +1946,6 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
|
|||
case RDMA_CM_EVENT_ROUTE_ERROR:
|
||||
case RDMA_CM_EVENT_CONNECT_ERROR:
|
||||
case RDMA_CM_EVENT_UNREACHABLE:
|
||||
nvme_rdma_destroy_queue_ib(queue);
|
||||
fallthrough;
|
||||
case RDMA_CM_EVENT_ADDR_ERROR:
|
||||
dev_dbg(queue->ctrl->ctrl.device,
|
||||
"CM error event %d\n", ev->event);
|
||||
|
|
|
@ -274,6 +274,12 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
|
|||
} while (ret > 0);
|
||||
}
|
||||
|
||||
static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
|
||||
{
|
||||
return !list_empty(&queue->send_list) ||
|
||||
!llist_empty(&queue->req_list) || queue->more_requests;
|
||||
}
|
||||
|
||||
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
|
||||
bool sync, bool last)
|
||||
{
|
||||
|
@ -294,9 +300,10 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
|
|||
nvme_tcp_send_all(queue);
|
||||
queue->more_requests = false;
|
||||
mutex_unlock(&queue->send_mutex);
|
||||
} else if (last) {
|
||||
queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
|
||||
}
|
||||
|
||||
if (last && nvme_tcp_queue_more(queue))
|
||||
queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
|
||||
}
|
||||
|
||||
static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
|
||||
|
@ -906,12 +913,6 @@ static void nvme_tcp_state_change(struct sock *sk)
|
|||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
|
||||
static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
|
||||
{
|
||||
return !list_empty(&queue->send_list) ||
|
||||
!llist_empty(&queue->req_list) || queue->more_requests;
|
||||
}
|
||||
|
||||
static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
|
||||
{
|
||||
queue->request = NULL;
|
||||
|
@ -1145,8 +1146,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
|
|||
pending = true;
|
||||
else if (unlikely(result < 0))
|
||||
break;
|
||||
} else
|
||||
pending = !llist_empty(&queue->req_list);
|
||||
}
|
||||
|
||||
result = nvme_tcp_try_recv(queue);
|
||||
if (result > 0)
|
||||
|
|
|
@ -1067,7 +1067,7 @@ static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
|
|||
{
|
||||
struct nvmet_subsys *subsys = to_subsys(item);
|
||||
|
||||
return snprintf(page, PAGE_SIZE, "%*s\n",
|
||||
return snprintf(page, PAGE_SIZE, "%.*s\n",
|
||||
NVMET_SN_MAX_SIZE, subsys->serial);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue