Merge branches 'misc', 'qedr', 'reject-helpers', 'rxe' and 'srp' into merge-test
This commit is contained in:
commit
9032ad78bb
|
@ -6376,10 +6376,7 @@ F: drivers/net/ethernet/intel/*/
|
|||
|
||||
INTEL RDMA RNIC DRIVER
|
||||
M: Faisal Latif <faisal.latif@intel.com>
|
||||
R: Chien Tin Tung <chien.tin.tung@intel.com>
|
||||
R: Mustafa Ismail <mustafa.ismail@intel.com>
|
||||
R: Shiraz Saleem <shiraz.saleem@intel.com>
|
||||
R: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
|
||||
M: Shiraz Saleem <shiraz.saleem@intel.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/infiniband/hw/i40iw/
|
||||
|
@ -10901,7 +10898,6 @@ F: drivers/net/ethernet/emulex/benet/
|
|||
EMULEX ONECONNECT ROCE DRIVER
|
||||
M: Selvin Xavier <selvin.xavier@avagotech.com>
|
||||
M: Devesh Sharma <devesh.sharma@avagotech.com>
|
||||
M: Mitesh Ahuja <mitesh.ahuja@avagotech.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
W: http://www.emulex.com
|
||||
S: Supported
|
||||
|
|
|
@ -57,6 +57,54 @@ MODULE_AUTHOR("Sean Hefty");
|
|||
MODULE_DESCRIPTION("InfiniBand CM");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
|
||||
static const char * const ibcm_rej_reason_strs[] = {
|
||||
[IB_CM_REJ_NO_QP] = "no QP",
|
||||
[IB_CM_REJ_NO_EEC] = "no EEC",
|
||||
[IB_CM_REJ_NO_RESOURCES] = "no resources",
|
||||
[IB_CM_REJ_TIMEOUT] = "timeout",
|
||||
[IB_CM_REJ_UNSUPPORTED] = "unsupported",
|
||||
[IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID",
|
||||
[IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance",
|
||||
[IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID",
|
||||
[IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type",
|
||||
[IB_CM_REJ_STALE_CONN] = "stale conn",
|
||||
[IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist",
|
||||
[IB_CM_REJ_INVALID_GID] = "invalid GID",
|
||||
[IB_CM_REJ_INVALID_LID] = "invalid LID",
|
||||
[IB_CM_REJ_INVALID_SL] = "invalid SL",
|
||||
[IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class",
|
||||
[IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit",
|
||||
[IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate",
|
||||
[IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID",
|
||||
[IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID",
|
||||
[IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL",
|
||||
[IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class",
|
||||
[IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit",
|
||||
[IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate",
|
||||
[IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect",
|
||||
[IB_CM_REJ_PORT_REDIRECT] = "port redirect",
|
||||
[IB_CM_REJ_INVALID_MTU] = "invalid MTU",
|
||||
[IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources",
|
||||
[IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined",
|
||||
[IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry",
|
||||
[IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID",
|
||||
[IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version",
|
||||
[IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label",
|
||||
[IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label",
|
||||
};
|
||||
|
||||
const char *__attribute_const__ ibcm_reject_msg(int reason)
|
||||
{
|
||||
size_t index = reason;
|
||||
|
||||
if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
|
||||
ibcm_rej_reason_strs[index])
|
||||
return ibcm_rej_reason_strs[index];
|
||||
else
|
||||
return "unrecognized reason";
|
||||
}
|
||||
EXPORT_SYMBOL(ibcm_reject_msg);
|
||||
|
||||
static void cm_add_one(struct ib_device *device);
|
||||
static void cm_remove_one(struct ib_device *device, void *client_data);
|
||||
|
||||
|
@ -1519,6 +1567,7 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
|
|||
struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
|
||||
struct cm_timewait_info *timewait_info;
|
||||
struct cm_req_msg *req_msg;
|
||||
struct ib_cm_id *cm_id;
|
||||
|
||||
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
|
||||
|
||||
|
@ -1540,10 +1589,18 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
|
|||
timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
|
||||
if (timewait_info) {
|
||||
cm_cleanup_timewait(cm_id_priv->timewait_info);
|
||||
cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
|
||||
timewait_info->work.remote_id);
|
||||
|
||||
spin_unlock_irq(&cm.lock);
|
||||
cm_issue_rej(work->port, work->mad_recv_wc,
|
||||
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
|
||||
NULL, 0);
|
||||
if (cur_cm_id_priv) {
|
||||
cm_id = &cur_cm_id_priv->id;
|
||||
ib_send_cm_dreq(cm_id, NULL, 0);
|
||||
cm_deref_id(cur_cm_id_priv);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1919,6 +1976,9 @@ static int cm_rep_handler(struct cm_work *work)
|
|||
struct cm_id_private *cm_id_priv;
|
||||
struct cm_rep_msg *rep_msg;
|
||||
int ret;
|
||||
struct cm_id_private *cur_cm_id_priv;
|
||||
struct ib_cm_id *cm_id;
|
||||
struct cm_timewait_info *timewait_info;
|
||||
|
||||
rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
|
||||
cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
|
||||
|
@ -1953,16 +2013,26 @@ static int cm_rep_handler(struct cm_work *work)
|
|||
goto error;
|
||||
}
|
||||
/* Check for a stale connection. */
|
||||
if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
|
||||
timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
|
||||
if (timewait_info) {
|
||||
rb_erase(&cm_id_priv->timewait_info->remote_id_node,
|
||||
&cm.remote_id_table);
|
||||
cm_id_priv->timewait_info->inserted_remote_id = 0;
|
||||
cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
|
||||
timewait_info->work.remote_id);
|
||||
|
||||
spin_unlock(&cm.lock);
|
||||
spin_unlock_irq(&cm_id_priv->lock);
|
||||
cm_issue_rej(work->port, work->mad_recv_wc,
|
||||
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
|
||||
NULL, 0);
|
||||
ret = -EINVAL;
|
||||
if (cur_cm_id_priv) {
|
||||
cm_id = &cur_cm_id_priv->id;
|
||||
ib_send_cm_dreq(cm_id, NULL, 0);
|
||||
cm_deref_id(cur_cm_id_priv);
|
||||
}
|
||||
|
||||
goto error;
|
||||
}
|
||||
spin_unlock(&cm.lock);
|
||||
|
|
|
@ -101,6 +101,49 @@ const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
|
|||
}
|
||||
EXPORT_SYMBOL(rdma_event_msg);
|
||||
|
||||
const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id,
|
||||
int reason)
|
||||
{
|
||||
if (rdma_ib_or_roce(id->device, id->port_num))
|
||||
return ibcm_reject_msg(reason);
|
||||
|
||||
if (rdma_protocol_iwarp(id->device, id->port_num))
|
||||
return iwcm_reject_msg(reason);
|
||||
|
||||
WARN_ON_ONCE(1);
|
||||
return "unrecognized transport";
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_reject_msg);
|
||||
|
||||
bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason)
|
||||
{
|
||||
if (rdma_ib_or_roce(id->device, id->port_num))
|
||||
return reason == IB_CM_REJ_CONSUMER_DEFINED;
|
||||
|
||||
if (rdma_protocol_iwarp(id->device, id->port_num))
|
||||
return reason == -ECONNREFUSED;
|
||||
|
||||
WARN_ON_ONCE(1);
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_is_consumer_reject);
|
||||
|
||||
const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
|
||||
struct rdma_cm_event *ev, u8 *data_len)
|
||||
{
|
||||
const void *p;
|
||||
|
||||
if (rdma_is_consumer_reject(id, ev->status)) {
|
||||
*data_len = ev->param.conn.private_data_len;
|
||||
p = ev->param.conn.private_data;
|
||||
} else {
|
||||
*data_len = 0;
|
||||
p = NULL;
|
||||
}
|
||||
return p;
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_consumer_reject_data);
|
||||
|
||||
static void cma_add_one(struct ib_device *device);
|
||||
static void cma_remove_one(struct ib_device *device, void *client_data);
|
||||
|
||||
|
|
|
@ -59,6 +59,27 @@ MODULE_AUTHOR("Tom Tucker");
|
|||
MODULE_DESCRIPTION("iWARP CM");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
|
||||
static const char * const iwcm_rej_reason_strs[] = {
|
||||
[ECONNRESET] = "reset by remote host",
|
||||
[ECONNREFUSED] = "refused by remote application",
|
||||
[ETIMEDOUT] = "setup timeout",
|
||||
};
|
||||
|
||||
const char *__attribute_const__ iwcm_reject_msg(int reason)
|
||||
{
|
||||
size_t index;
|
||||
|
||||
/* iWARP uses negative errnos */
|
||||
index = -reason;
|
||||
|
||||
if (index < ARRAY_SIZE(iwcm_rej_reason_strs) &&
|
||||
iwcm_rej_reason_strs[index])
|
||||
return iwcm_rej_reason_strs[index];
|
||||
else
|
||||
return "unrecognized reason";
|
||||
}
|
||||
EXPORT_SYMBOL(iwcm_reject_msg);
|
||||
|
||||
static struct ibnl_client_cbs iwcm_nl_cb_table[] = {
|
||||
[RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
|
||||
[RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
|
||||
|
|
|
@ -769,7 +769,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
|
|||
* If we are at the start of the LID routed part, don't update the
|
||||
* hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
|
||||
*/
|
||||
if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) {
|
||||
if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
|
||||
u32 opa_drslid;
|
||||
|
||||
if ((opa_get_smp_direction(opa_smp)
|
||||
|
@ -1728,7 +1728,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
|
|||
if (!class)
|
||||
goto out;
|
||||
if (convert_mgmt_class(mad_hdr->mgmt_class) >=
|
||||
IB_MGMT_MAX_METHODS)
|
||||
ARRAY_SIZE(class->method_table))
|
||||
goto out;
|
||||
method = class->method_table[convert_mgmt_class(
|
||||
mad_hdr->mgmt_class)];
|
||||
|
@ -2149,7 +2149,7 @@ handle_smi(struct ib_mad_port_private *port_priv,
|
|||
struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
|
||||
|
||||
if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
|
||||
mad_hdr->class_version == OPA_SMI_CLASS_VERSION)
|
||||
mad_hdr->class_version == OPA_SM_CLASS_VERSION)
|
||||
return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
|
||||
response);
|
||||
|
||||
|
|
|
@ -518,8 +518,11 @@ static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
|
|||
process_join_error(group, status);
|
||||
else {
|
||||
int mgids_changed, is_mgid0;
|
||||
ib_find_pkey(group->port->dev->device, group->port->port_num,
|
||||
be16_to_cpu(rec->pkey), &pkey_index);
|
||||
|
||||
if (ib_find_pkey(group->port->dev->device,
|
||||
group->port->port_num, be16_to_cpu(rec->pkey),
|
||||
&pkey_index))
|
||||
pkey_index = MCAST_INVALID_PKEY_INDEX;
|
||||
|
||||
spin_lock_irq(&group->port->lock);
|
||||
if (group->state == MCAST_BUSY &&
|
||||
|
|
|
@ -51,7 +51,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
|
|||
|
||||
if (umem->nmap > 0)
|
||||
ib_dma_unmap_sg(dev, umem->sg_head.sgl,
|
||||
umem->nmap,
|
||||
umem->npages,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
|
||||
|
|
|
@ -128,7 +128,7 @@ static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
|
|||
smp = send_buf->mad;
|
||||
smp->base_version = OPA_MGMT_BASE_VERSION;
|
||||
smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
|
||||
smp->class_version = OPA_SMI_CLASS_VERSION;
|
||||
smp->class_version = OPA_SM_CLASS_VERSION;
|
||||
smp->method = IB_MGMT_METHOD_TRAP;
|
||||
ibp->rvp.tid++;
|
||||
smp->tid = cpu_to_be64(ibp->rvp.tid);
|
||||
|
@ -344,7 +344,7 @@ static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data,
|
|||
|
||||
ni->port_guid = get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX);
|
||||
ni->base_version = OPA_MGMT_BASE_VERSION;
|
||||
ni->class_version = OPA_SMI_CLASS_VERSION;
|
||||
ni->class_version = OPA_SM_CLASS_VERSION;
|
||||
ni->node_type = 1; /* channel adapter */
|
||||
ni->num_ports = ibdev->phys_port_cnt;
|
||||
/* This is already in network order */
|
||||
|
@ -381,7 +381,7 @@ static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
|
|||
|
||||
nip->port_guid = get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX);
|
||||
nip->base_version = OPA_MGMT_BASE_VERSION;
|
||||
nip->class_version = OPA_SMI_CLASS_VERSION;
|
||||
nip->class_version = OPA_SM_CLASS_VERSION;
|
||||
nip->node_type = 1; /* channel adapter */
|
||||
nip->num_ports = ibdev->phys_port_cnt;
|
||||
/* This is already in network order */
|
||||
|
@ -2303,7 +2303,7 @@ static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
|
|||
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
|
||||
|
||||
p->base_version = OPA_MGMT_BASE_VERSION;
|
||||
p->class_version = OPA_SMI_CLASS_VERSION;
|
||||
p->class_version = OPA_SM_CLASS_VERSION;
|
||||
/*
|
||||
* Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
|
||||
*/
|
||||
|
@ -4023,7 +4023,7 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
|
|||
|
||||
am = be32_to_cpu(smp->attr_mod);
|
||||
attr_id = smp->attr_id;
|
||||
if (smp->class_version != OPA_SMI_CLASS_VERSION) {
|
||||
if (smp->class_version != OPA_SM_CLASS_VERSION) {
|
||||
smp->status |= IB_SMP_UNSUP_VERSION;
|
||||
ret = reply((struct ib_mad_hdr *)smp);
|
||||
return ret;
|
||||
|
@ -4233,7 +4233,7 @@ static int process_perf_opa(struct ib_device *ibdev, u8 port,
|
|||
|
||||
*out_mad = *in_mad;
|
||||
|
||||
if (pmp->mad_hdr.class_version != OPA_SMI_CLASS_VERSION) {
|
||||
if (pmp->mad_hdr.class_version != OPA_SM_CLASS_VERSION) {
|
||||
pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
|
||||
return reply((struct ib_mad_hdr *)pmp);
|
||||
}
|
||||
|
|
|
@ -430,7 +430,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
|
|||
struct mlx4_ib_dev *dev = to_mdev(ibdev);
|
||||
struct ib_smp *in_mad = NULL;
|
||||
struct ib_smp *out_mad = NULL;
|
||||
int err = -ENOMEM;
|
||||
int err;
|
||||
int have_ib_ports;
|
||||
struct mlx4_uverbs_ex_query_device cmd;
|
||||
struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0};
|
||||
|
@ -455,6 +455,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
|
|||
sizeof(resp.response_length);
|
||||
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
|
||||
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
|
||||
err = -ENOMEM;
|
||||
if (!in_mad || !out_mad)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -662,10 +662,14 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
nesnic->sq_head &= nesnic->sq_size-1;
|
||||
}
|
||||
} else {
|
||||
nesvnic->linearized_skbs++;
|
||||
hoffset = skb_transport_header(skb) - skb->data;
|
||||
nhoffset = skb_network_header(skb) - skb->data;
|
||||
skb_linearize(skb);
|
||||
if (skb_linearize(skb)) {
|
||||
nesvnic->tx_sw_dropped++;
|
||||
kfree_skb(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
nesvnic->linearized_skbs++;
|
||||
skb_set_transport_header(skb, hoffset);
|
||||
skb_set_network_header(skb, nhoffset);
|
||||
if (!nes_nic_send(skb, netdev))
|
||||
|
@ -1465,7 +1469,8 @@ static int nes_netdev_set_pauseparam(struct net_device *netdev,
|
|||
/**
|
||||
* nes_netdev_get_settings
|
||||
*/
|
||||
static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd)
|
||||
static int nes_netdev_get_link_ksettings(struct net_device *netdev,
|
||||
struct ethtool_link_ksettings *cmd)
|
||||
{
|
||||
struct nes_vnic *nesvnic = netdev_priv(netdev);
|
||||
struct nes_device *nesdev = nesvnic->nesdev;
|
||||
|
@ -1474,54 +1479,59 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd
|
|||
u8 phy_type = nesadapter->phy_type[mac_index];
|
||||
u8 phy_index = nesadapter->phy_index[mac_index];
|
||||
u16 phy_data;
|
||||
u32 supported, advertising;
|
||||
|
||||
et_cmd->duplex = DUPLEX_FULL;
|
||||
et_cmd->port = PORT_MII;
|
||||
et_cmd->maxtxpkt = 511;
|
||||
et_cmd->maxrxpkt = 511;
|
||||
cmd->base.duplex = DUPLEX_FULL;
|
||||
cmd->base.port = PORT_MII;
|
||||
|
||||
if (nesadapter->OneG_Mode) {
|
||||
ethtool_cmd_speed_set(et_cmd, SPEED_1000);
|
||||
cmd->base.speed = SPEED_1000;
|
||||
if (phy_type == NES_PHY_TYPE_PUMA_1G) {
|
||||
et_cmd->supported = SUPPORTED_1000baseT_Full;
|
||||
et_cmd->advertising = ADVERTISED_1000baseT_Full;
|
||||
et_cmd->autoneg = AUTONEG_DISABLE;
|
||||
et_cmd->transceiver = XCVR_INTERNAL;
|
||||
et_cmd->phy_address = mac_index;
|
||||
supported = SUPPORTED_1000baseT_Full;
|
||||
advertising = ADVERTISED_1000baseT_Full;
|
||||
cmd->base.autoneg = AUTONEG_DISABLE;
|
||||
cmd->base.phy_address = mac_index;
|
||||
} else {
|
||||
unsigned long flags;
|
||||
et_cmd->supported = SUPPORTED_1000baseT_Full
|
||||
| SUPPORTED_Autoneg;
|
||||
et_cmd->advertising = ADVERTISED_1000baseT_Full
|
||||
| ADVERTISED_Autoneg;
|
||||
|
||||
supported = SUPPORTED_1000baseT_Full
|
||||
| SUPPORTED_Autoneg;
|
||||
advertising = ADVERTISED_1000baseT_Full
|
||||
| ADVERTISED_Autoneg;
|
||||
spin_lock_irqsave(&nesadapter->phy_lock, flags);
|
||||
nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
|
||||
spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
|
||||
if (phy_data & 0x1000)
|
||||
et_cmd->autoneg = AUTONEG_ENABLE;
|
||||
cmd->base.autoneg = AUTONEG_ENABLE;
|
||||
else
|
||||
et_cmd->autoneg = AUTONEG_DISABLE;
|
||||
et_cmd->transceiver = XCVR_EXTERNAL;
|
||||
et_cmd->phy_address = phy_index;
|
||||
cmd->base.autoneg = AUTONEG_DISABLE;
|
||||
cmd->base.phy_address = phy_index;
|
||||
}
|
||||
ethtool_convert_legacy_u32_to_link_mode(
|
||||
cmd->link_modes.supported, supported);
|
||||
ethtool_convert_legacy_u32_to_link_mode(
|
||||
cmd->link_modes.advertising, advertising);
|
||||
return 0;
|
||||
}
|
||||
if ((phy_type == NES_PHY_TYPE_ARGUS) ||
|
||||
(phy_type == NES_PHY_TYPE_SFP_D) ||
|
||||
(phy_type == NES_PHY_TYPE_KR)) {
|
||||
et_cmd->transceiver = XCVR_EXTERNAL;
|
||||
et_cmd->port = PORT_FIBRE;
|
||||
et_cmd->supported = SUPPORTED_FIBRE;
|
||||
et_cmd->advertising = ADVERTISED_FIBRE;
|
||||
et_cmd->phy_address = phy_index;
|
||||
cmd->base.port = PORT_FIBRE;
|
||||
supported = SUPPORTED_FIBRE;
|
||||
advertising = ADVERTISED_FIBRE;
|
||||
cmd->base.phy_address = phy_index;
|
||||
} else {
|
||||
et_cmd->transceiver = XCVR_INTERNAL;
|
||||
et_cmd->supported = SUPPORTED_10000baseT_Full;
|
||||
et_cmd->advertising = ADVERTISED_10000baseT_Full;
|
||||
et_cmd->phy_address = mac_index;
|
||||
supported = SUPPORTED_10000baseT_Full;
|
||||
advertising = ADVERTISED_10000baseT_Full;
|
||||
cmd->base.phy_address = mac_index;
|
||||
}
|
||||
ethtool_cmd_speed_set(et_cmd, SPEED_10000);
|
||||
et_cmd->autoneg = AUTONEG_DISABLE;
|
||||
cmd->base.speed = SPEED_10000;
|
||||
cmd->base.autoneg = AUTONEG_DISABLE;
|
||||
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
|
||||
supported);
|
||||
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
|
||||
advertising);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1529,7 +1539,9 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd
|
|||
/**
|
||||
* nes_netdev_set_settings
|
||||
*/
|
||||
static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd)
|
||||
static int
|
||||
nes_netdev_set_link_ksettings(struct net_device *netdev,
|
||||
const struct ethtool_link_ksettings *cmd)
|
||||
{
|
||||
struct nes_vnic *nesvnic = netdev_priv(netdev);
|
||||
struct nes_device *nesdev = nesvnic->nesdev;
|
||||
|
@ -1543,7 +1555,7 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd
|
|||
|
||||
spin_lock_irqsave(&nesadapter->phy_lock, flags);
|
||||
nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
|
||||
if (et_cmd->autoneg) {
|
||||
if (cmd->base.autoneg) {
|
||||
/* Turn on Full duplex, Autoneg, and restart autonegotiation */
|
||||
phy_data |= 0x1300;
|
||||
} else {
|
||||
|
@ -1560,8 +1572,6 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd
|
|||
|
||||
static const struct ethtool_ops nes_ethtool_ops = {
|
||||
.get_link = ethtool_op_get_link,
|
||||
.get_settings = nes_netdev_get_settings,
|
||||
.set_settings = nes_netdev_set_settings,
|
||||
.get_strings = nes_netdev_get_strings,
|
||||
.get_sset_count = nes_netdev_get_sset_count,
|
||||
.get_ethtool_stats = nes_netdev_get_ethtool_stats,
|
||||
|
@ -1570,6 +1580,8 @@ static const struct ethtool_ops nes_ethtool_ops = {
|
|||
.set_coalesce = nes_netdev_set_coalesce,
|
||||
.get_pauseparam = nes_netdev_get_pauseparam,
|
||||
.set_pauseparam = nes_netdev_set_pauseparam,
|
||||
.get_link_ksettings = nes_netdev_get_link_ksettings,
|
||||
.set_link_ksettings = nes_netdev_set_link_ksettings,
|
||||
};
|
||||
|
||||
static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, netdev_features_t features)
|
||||
|
|
|
@ -1641,7 +1641,7 @@ static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
|
|||
static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
|
||||
{
|
||||
int i;
|
||||
int status = 0;
|
||||
int status = -ENOMEM;
|
||||
int max_ah;
|
||||
struct ocrdma_create_ah_tbl *cmd;
|
||||
struct ocrdma_create_ah_tbl_rsp *rsp;
|
||||
|
|
|
@ -511,8 +511,10 @@ int qedr_dealloc_pd(struct ib_pd *ibpd)
|
|||
struct qedr_dev *dev = get_qedr_dev(ibpd->device);
|
||||
struct qedr_pd *pd = get_qedr_pd(ibpd);
|
||||
|
||||
if (!pd)
|
||||
if (!pd) {
|
||||
pr_err("Invalid PD received in dealloc_pd\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
|
||||
dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
|
||||
|
@ -1477,6 +1479,7 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
|
|||
struct qedr_ucontext *ctx = NULL;
|
||||
struct qedr_create_qp_ureq ureq;
|
||||
struct qedr_qp *qp;
|
||||
struct ib_qp *ibqp;
|
||||
int rc = 0;
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
|
||||
|
@ -1486,13 +1489,13 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
|
|||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
|
||||
if (attrs->srq)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
|
||||
if (!qp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (attrs->srq)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_QP,
|
||||
"create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
|
||||
get_qedr_cq(attrs->send_cq),
|
||||
|
@ -1508,7 +1511,10 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
|
|||
"create qp: unexpected udata when creating GSI QP\n");
|
||||
goto err0;
|
||||
}
|
||||
return qedr_create_gsi_qp(dev, attrs, qp);
|
||||
ibqp = qedr_create_gsi_qp(dev, attrs, qp);
|
||||
if (IS_ERR(ibqp))
|
||||
kfree(qp);
|
||||
return ibqp;
|
||||
}
|
||||
|
||||
memset(&in_params, 0, sizeof(in_params));
|
||||
|
@ -2414,8 +2420,7 @@ static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
|
|||
*/
|
||||
pbl = list_first_entry(&info->inuse_pbl_list,
|
||||
struct qedr_pbl, list_entry);
|
||||
list_del(&pbl->list_entry);
|
||||
list_add_tail(&pbl->list_entry, &info->free_pbl_list);
|
||||
list_move_tail(&pbl->list_entry, &info->free_pbl_list);
|
||||
info->completed_handled++;
|
||||
}
|
||||
}
|
||||
|
@ -2982,11 +2987,6 @@ int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!wr) {
|
||||
DP_ERR(dev, "Got an empty post send.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
while (wr) {
|
||||
rc = __qedr_post_send(ibqp, wr, bad_wr);
|
||||
if (rc)
|
||||
|
|
|
@ -117,10 +117,10 @@ static int enable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
|
|||
vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
|
||||
|
||||
res_chunk = get_qp_res_chunk(qp_grp);
|
||||
if (IS_ERR_OR_NULL(res_chunk)) {
|
||||
if (IS_ERR(res_chunk)) {
|
||||
usnic_err("Unable to get qp res with err %ld\n",
|
||||
PTR_ERR(res_chunk));
|
||||
return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
|
||||
return PTR_ERR(res_chunk);
|
||||
}
|
||||
|
||||
for (i = 0; i < res_chunk->cnt; i++) {
|
||||
|
@ -158,10 +158,10 @@ static int disable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
|
|||
vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
|
||||
|
||||
res_chunk = get_qp_res_chunk(qp_grp);
|
||||
if (IS_ERR_OR_NULL(res_chunk)) {
|
||||
if (IS_ERR(res_chunk)) {
|
||||
usnic_err("Unable to get qp res with err %ld\n",
|
||||
PTR_ERR(res_chunk));
|
||||
return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
|
||||
return PTR_ERR(res_chunk);
|
||||
}
|
||||
|
||||
for (i = 0; i < res_chunk->cnt; i++) {
|
||||
|
@ -186,11 +186,11 @@ static int init_filter_action(struct usnic_ib_qp_grp *qp_grp,
|
|||
struct usnic_vnic_res_chunk *res_chunk;
|
||||
|
||||
res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
|
||||
if (IS_ERR_OR_NULL(res_chunk)) {
|
||||
if (IS_ERR(res_chunk)) {
|
||||
usnic_err("Unable to get %s with err %ld\n",
|
||||
usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
|
||||
PTR_ERR(res_chunk));
|
||||
return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
|
||||
return PTR_ERR(res_chunk);
|
||||
}
|
||||
|
||||
uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
|
||||
|
|
|
@ -87,12 +87,12 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
|
|||
resp.bar_len = bar->len;
|
||||
|
||||
chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
|
||||
if (IS_ERR_OR_NULL(chunk)) {
|
||||
if (IS_ERR(chunk)) {
|
||||
usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
|
||||
usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
|
||||
qp_grp->grp_id,
|
||||
PTR_ERR(chunk));
|
||||
return chunk ? PTR_ERR(chunk) : -ENOMEM;
|
||||
return PTR_ERR(chunk);
|
||||
}
|
||||
|
||||
WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ);
|
||||
|
@ -101,12 +101,12 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
|
|||
resp.rq_idx[i] = chunk->res[i]->vnic_idx;
|
||||
|
||||
chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ);
|
||||
if (IS_ERR_OR_NULL(chunk)) {
|
||||
if (IS_ERR(chunk)) {
|
||||
usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
|
||||
usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ),
|
||||
qp_grp->grp_id,
|
||||
PTR_ERR(chunk));
|
||||
return chunk ? PTR_ERR(chunk) : -ENOMEM;
|
||||
return PTR_ERR(chunk);
|
||||
}
|
||||
|
||||
WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ);
|
||||
|
@ -115,12 +115,12 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
|
|||
resp.wq_idx[i] = chunk->res[i]->vnic_idx;
|
||||
|
||||
chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ);
|
||||
if (IS_ERR_OR_NULL(chunk)) {
|
||||
if (IS_ERR(chunk)) {
|
||||
usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
|
||||
usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ),
|
||||
qp_grp->grp_id,
|
||||
PTR_ERR(chunk));
|
||||
return chunk ? PTR_ERR(chunk) : -ENOMEM;
|
||||
return PTR_ERR(chunk);
|
||||
}
|
||||
|
||||
WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ);
|
||||
|
|
|
@ -420,11 +420,12 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
|
|||
(wqe->wr.send_flags & IB_SEND_SIGNALED) ||
|
||||
(qp->req.state == QP_STATE_ERROR)) {
|
||||
make_send_cqe(qp, wqe, &cqe);
|
||||
advance_consumer(qp->sq.queue);
|
||||
rxe_cq_post(qp->scq, &cqe, 0);
|
||||
} else {
|
||||
advance_consumer(qp->sq.queue);
|
||||
}
|
||||
|
||||
advance_consumer(qp->sq.queue);
|
||||
|
||||
/*
|
||||
* we completed something so let req run again
|
||||
* if it is trying to fence
|
||||
|
@ -510,6 +511,8 @@ int rxe_completer(void *arg)
|
|||
struct rxe_pkt_info *pkt = NULL;
|
||||
enum comp_state state;
|
||||
|
||||
rxe_add_ref(qp);
|
||||
|
||||
if (!qp->valid) {
|
||||
while ((skb = skb_dequeue(&qp->resp_pkts))) {
|
||||
rxe_drop_ref(qp);
|
||||
|
@ -739,11 +742,13 @@ int rxe_completer(void *arg)
|
|||
/* we come here if we are done with processing and want the task to
|
||||
* exit from the loop calling us
|
||||
*/
|
||||
rxe_drop_ref(qp);
|
||||
return -EAGAIN;
|
||||
|
||||
done:
|
||||
/* we come here if we have processed a packet we want the task to call
|
||||
* us again to see if there is anything else to do
|
||||
*/
|
||||
rxe_drop_ref(qp);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -266,8 +266,6 @@ static inline int rxe_xmit_packet(struct rxe_dev *rxe, struct rxe_qp *qp,
|
|||
return err;
|
||||
}
|
||||
|
||||
atomic_inc(&qp->skb_out);
|
||||
|
||||
if ((qp_type(qp) != IB_QPT_RC) &&
|
||||
(pkt->mask & RXE_END_MASK)) {
|
||||
pkt->wqe->state = wqe_state_done;
|
||||
|
|
|
@ -355,6 +355,9 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
|
|||
size_t offset;
|
||||
u32 crc = crcp ? (*crcp) : 0;
|
||||
|
||||
if (length == 0)
|
||||
return 0;
|
||||
|
||||
if (mem->type == RXE_MEM_TYPE_DMA) {
|
||||
u8 *src, *dest;
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
#include "rxe_loc.h"
|
||||
|
||||
static LIST_HEAD(rxe_dev_list);
|
||||
static spinlock_t dev_list_lock; /* spinlock for device list */
|
||||
static DEFINE_SPINLOCK(dev_list_lock); /* spinlock for device list */
|
||||
|
||||
struct rxe_dev *net_to_rxe(struct net_device *ndev)
|
||||
{
|
||||
|
@ -459,6 +459,8 @@ static int send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
|
|||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (pkt->qp)
|
||||
atomic_inc(&pkt->qp->skb_out);
|
||||
kfree_skb(skb);
|
||||
|
||||
return 0;
|
||||
|
@ -663,8 +665,6 @@ struct notifier_block rxe_net_notifier = {
|
|||
|
||||
int rxe_net_ipv4_init(void)
|
||||
{
|
||||
spin_lock_init(&dev_list_lock);
|
||||
|
||||
recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
|
||||
htons(ROCE_V2_UDP_DPORT), false);
|
||||
if (IS_ERR(recv_sockets.sk4)) {
|
||||
|
@ -680,8 +680,6 @@ int rxe_net_ipv6_init(void)
|
|||
{
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
|
||||
spin_lock_init(&dev_list_lock);
|
||||
|
||||
recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
|
||||
htons(ROCE_V2_UDP_DPORT), true);
|
||||
if (IS_ERR(recv_sockets.sk6)) {
|
||||
|
|
|
@ -391,16 +391,15 @@ int rxe_rcv(struct sk_buff *skb)
|
|||
payload_size(pkt));
|
||||
calc_icrc = cpu_to_be32(~calc_icrc);
|
||||
if (unlikely(calc_icrc != pack_icrc)) {
|
||||
char saddr[sizeof(struct in6_addr)];
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IPV6))
|
||||
sprintf(saddr, "%pI6", &ipv6_hdr(skb)->saddr);
|
||||
pr_warn_ratelimited("bad ICRC from %pI6c\n",
|
||||
&ipv6_hdr(skb)->saddr);
|
||||
else if (skb->protocol == htons(ETH_P_IP))
|
||||
sprintf(saddr, "%pI4", &ip_hdr(skb)->saddr);
|
||||
pr_warn_ratelimited("bad ICRC from %pI4\n",
|
||||
&ip_hdr(skb)->saddr);
|
||||
else
|
||||
sprintf(saddr, "unknown");
|
||||
pr_warn_ratelimited("bad ICRC from unknown\n");
|
||||
|
||||
pr_warn_ratelimited("bad ICRC from %s\n", saddr);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
|
|
|
@ -548,23 +548,23 @@ static void update_wqe_psn(struct rxe_qp *qp,
|
|||
static void save_state(struct rxe_send_wqe *wqe,
|
||||
struct rxe_qp *qp,
|
||||
struct rxe_send_wqe *rollback_wqe,
|
||||
struct rxe_qp *rollback_qp)
|
||||
u32 *rollback_psn)
|
||||
{
|
||||
rollback_wqe->state = wqe->state;
|
||||
rollback_wqe->first_psn = wqe->first_psn;
|
||||
rollback_wqe->last_psn = wqe->last_psn;
|
||||
rollback_qp->req.psn = qp->req.psn;
|
||||
*rollback_psn = qp->req.psn;
|
||||
}
|
||||
|
||||
static void rollback_state(struct rxe_send_wqe *wqe,
|
||||
struct rxe_qp *qp,
|
||||
struct rxe_send_wqe *rollback_wqe,
|
||||
struct rxe_qp *rollback_qp)
|
||||
u32 rollback_psn)
|
||||
{
|
||||
wqe->state = rollback_wqe->state;
|
||||
wqe->first_psn = rollback_wqe->first_psn;
|
||||
wqe->last_psn = rollback_wqe->last_psn;
|
||||
qp->req.psn = rollback_qp->req.psn;
|
||||
qp->req.psn = rollback_psn;
|
||||
}
|
||||
|
||||
static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
|
||||
|
@ -593,8 +593,10 @@ int rxe_requester(void *arg)
|
|||
int mtu;
|
||||
int opcode;
|
||||
int ret;
|
||||
struct rxe_qp rollback_qp;
|
||||
struct rxe_send_wqe rollback_wqe;
|
||||
u32 rollback_psn;
|
||||
|
||||
rxe_add_ref(qp);
|
||||
|
||||
next_wqe:
|
||||
if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
|
||||
|
@ -718,7 +720,7 @@ int rxe_requester(void *arg)
|
|||
* rxe_xmit_packet().
|
||||
* Otherwise, completer might initiate an unjustified retry flow.
|
||||
*/
|
||||
save_state(wqe, qp, &rollback_wqe, &rollback_qp);
|
||||
save_state(wqe, qp, &rollback_wqe, &rollback_psn);
|
||||
update_wqe_state(qp, wqe, &pkt);
|
||||
update_wqe_psn(qp, wqe, &pkt, payload);
|
||||
ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
|
||||
|
@ -726,7 +728,7 @@ int rxe_requester(void *arg)
|
|||
qp->need_req_skb = 1;
|
||||
kfree_skb(skb);
|
||||
|
||||
rollback_state(wqe, qp, &rollback_wqe, &rollback_qp);
|
||||
rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
|
||||
|
||||
if (ret == -EAGAIN) {
|
||||
rxe_run_task(&qp->req.task, 1);
|
||||
|
@ -750,9 +752,10 @@ int rxe_requester(void *arg)
|
|||
while (rxe_completer(qp) == 0)
|
||||
;
|
||||
}
|
||||
|
||||
rxe_drop_ref(qp);
|
||||
return 0;
|
||||
|
||||
exit:
|
||||
rxe_drop_ref(qp);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
|
|
@ -444,6 +444,13 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
|
|||
return RESPST_EXECUTE;
|
||||
}
|
||||
|
||||
/* A zero-byte op is not required to set an addr or rkey. */
|
||||
if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) &&
|
||||
(pkt->mask & RXE_RETH_MASK) &&
|
||||
reth_len(pkt) == 0) {
|
||||
return RESPST_EXECUTE;
|
||||
}
|
||||
|
||||
va = qp->resp.va;
|
||||
rkey = qp->resp.rkey;
|
||||
resid = qp->resp.resid;
|
||||
|
@ -680,9 +687,14 @@ static enum resp_states read_reply(struct rxe_qp *qp,
|
|||
res->read.va_org = qp->resp.va;
|
||||
|
||||
res->first_psn = req_pkt->psn;
|
||||
res->last_psn = req_pkt->psn +
|
||||
(reth_len(req_pkt) + mtu - 1) /
|
||||
mtu - 1;
|
||||
|
||||
if (reth_len(req_pkt)) {
|
||||
res->last_psn = (req_pkt->psn +
|
||||
(reth_len(req_pkt) + mtu - 1) /
|
||||
mtu - 1) & BTH_PSN_MASK;
|
||||
} else {
|
||||
res->last_psn = res->first_psn;
|
||||
}
|
||||
res->cur_psn = req_pkt->psn;
|
||||
|
||||
res->read.resid = qp->resp.resid;
|
||||
|
@ -742,7 +754,8 @@ static enum resp_states read_reply(struct rxe_qp *qp,
|
|||
} else {
|
||||
qp->resp.res = NULL;
|
||||
qp->resp.opcode = -1;
|
||||
qp->resp.psn = res->cur_psn;
|
||||
if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
|
||||
qp->resp.psn = res->cur_psn;
|
||||
state = RESPST_CLEANUP;
|
||||
}
|
||||
|
||||
|
@ -1132,6 +1145,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
|
|||
pkt, skb_copy);
|
||||
if (rc) {
|
||||
pr_err("Failed resending result. This flow is not handled - skb ignored\n");
|
||||
rxe_drop_ref(qp);
|
||||
kfree_skb(skb_copy);
|
||||
rc = RESPST_CLEANUP;
|
||||
goto out;
|
||||
|
@ -1198,6 +1212,8 @@ int rxe_responder(void *arg)
|
|||
struct rxe_pkt_info *pkt = NULL;
|
||||
int ret = 0;
|
||||
|
||||
rxe_add_ref(qp);
|
||||
|
||||
qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
|
||||
|
||||
if (!qp->valid) {
|
||||
|
@ -1386,5 +1402,6 @@ int rxe_responder(void *arg)
|
|||
exit:
|
||||
ret = -EAGAIN;
|
||||
done:
|
||||
rxe_drop_ref(qp);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -169,7 +169,7 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
|
|||
}
|
||||
}
|
||||
|
||||
err = rxe_queue_resize(q, (unsigned int *)&attr->max_wr,
|
||||
err = rxe_queue_resize(q, &attr->max_wr,
|
||||
rcv_wqe_size(srq->rq.max_sge),
|
||||
srq->rq.queue->ip ?
|
||||
srq->rq.queue->ip->context :
|
||||
|
|
|
@ -121,6 +121,7 @@ int rxe_init_task(void *obj, struct rxe_task *task,
|
|||
task->arg = arg;
|
||||
task->func = func;
|
||||
snprintf(task->name, sizeof(task->name), "%s", name);
|
||||
task->destroyed = false;
|
||||
|
||||
tasklet_init(&task->tasklet, rxe_do_task, (unsigned long)task);
|
||||
|
||||
|
@ -132,11 +133,29 @@ int rxe_init_task(void *obj, struct rxe_task *task,
|
|||
|
||||
void rxe_cleanup_task(struct rxe_task *task)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool idle;
|
||||
|
||||
/*
|
||||
* Mark the task, then wait for it to finish. It might be
|
||||
* running in a non-tasklet (direct call) context.
|
||||
*/
|
||||
task->destroyed = true;
|
||||
|
||||
do {
|
||||
spin_lock_irqsave(&task->state_lock, flags);
|
||||
idle = (task->state == TASK_STATE_START);
|
||||
spin_unlock_irqrestore(&task->state_lock, flags);
|
||||
} while (!idle);
|
||||
|
||||
tasklet_kill(&task->tasklet);
|
||||
}
|
||||
|
||||
void rxe_run_task(struct rxe_task *task, int sched)
|
||||
{
|
||||
if (task->destroyed)
|
||||
return;
|
||||
|
||||
if (sched)
|
||||
tasklet_schedule(&task->tasklet);
|
||||
else
|
||||
|
|
|
@ -54,6 +54,7 @@ struct rxe_task {
|
|||
int (*func)(void *arg);
|
||||
int ret;
|
||||
char name[16];
|
||||
bool destroyed;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -566,7 +566,7 @@ static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
|
|||
if (udata) {
|
||||
if (udata->inlen) {
|
||||
err = -EINVAL;
|
||||
goto err1;
|
||||
goto err2;
|
||||
}
|
||||
qp->is_user = 1;
|
||||
}
|
||||
|
@ -575,12 +575,13 @@ static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
|
|||
|
||||
err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd);
|
||||
if (err)
|
||||
goto err2;
|
||||
goto err3;
|
||||
|
||||
return &qp->ibqp;
|
||||
|
||||
err2:
|
||||
err3:
|
||||
rxe_drop_index(qp);
|
||||
err2:
|
||||
rxe_drop_ref(qp);
|
||||
err1:
|
||||
return ERR_PTR(err);
|
||||
|
@ -1009,11 +1010,19 @@ static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
|
|||
static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
|
||||
{
|
||||
struct rxe_cq *cq = to_rcq(ibcq);
|
||||
unsigned long irq_flags;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&cq->cq_lock, irq_flags);
|
||||
if (cq->notify != IB_CQ_NEXT_COMP)
|
||||
cq->notify = flags & IB_CQ_SOLICITED_MASK;
|
||||
|
||||
return 0;
|
||||
if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
|
||||
ret = 1;
|
||||
|
||||
spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
|
||||
|
|
|
@ -575,8 +575,11 @@ void ipoib_mcast_join_task(struct work_struct *work)
|
|||
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
|
||||
return;
|
||||
|
||||
if (ib_query_port(priv->ca, priv->port, &port_attr) ||
|
||||
port_attr.state != IB_PORT_ACTIVE) {
|
||||
if (ib_query_port(priv->ca, priv->port, &port_attr)) {
|
||||
ipoib_dbg(priv, "ib_query_port() failed\n");
|
||||
return;
|
||||
}
|
||||
if (port_attr.state != IB_PORT_ACTIVE) {
|
||||
ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n",
|
||||
port_attr.state);
|
||||
return;
|
||||
|
|
|
@ -890,11 +890,14 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
|
|||
case RDMA_CM_EVENT_ESTABLISHED:
|
||||
iser_connected_handler(cma_id, event->param.conn.private_data);
|
||||
break;
|
||||
case RDMA_CM_EVENT_REJECTED:
|
||||
iser_info("Connection rejected: %s\n",
|
||||
rdma_reject_msg(cma_id, event->status));
|
||||
/* FALLTHROUGH */
|
||||
case RDMA_CM_EVENT_ADDR_ERROR:
|
||||
case RDMA_CM_EVENT_ROUTE_ERROR:
|
||||
case RDMA_CM_EVENT_CONNECT_ERROR:
|
||||
case RDMA_CM_EVENT_UNREACHABLE:
|
||||
case RDMA_CM_EVENT_REJECTED:
|
||||
iser_connect_error(cma_id);
|
||||
break;
|
||||
case RDMA_CM_EVENT_DISCONNECTED:
|
||||
|
|
|
@ -789,6 +789,8 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|||
*/
|
||||
return 1;
|
||||
case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
|
||||
isert_info("Connection rejected: %s\n",
|
||||
rdma_reject_msg(cma_id, event->status));
|
||||
case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
|
||||
case RDMA_CM_EVENT_CONNECT_ERROR:
|
||||
ret = isert_connect_error(cma_id);
|
||||
|
@ -1842,6 +1844,8 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
|||
isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
|
||||
(void *)cmd->sense_buffer, pdu_len,
|
||||
DMA_TO_DEVICE);
|
||||
if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
|
||||
return -ENOMEM;
|
||||
|
||||
isert_cmd->pdu_buf_len = pdu_len;
|
||||
tx_dsg->addr = isert_cmd->pdu_buf_dma;
|
||||
|
@ -1969,6 +1973,8 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
|
|||
isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
|
||||
(void *)cmd->buf_ptr, ISCSI_HDR_LEN,
|
||||
DMA_TO_DEVICE);
|
||||
if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
|
||||
return -ENOMEM;
|
||||
isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
|
||||
tx_dsg->addr = isert_cmd->pdu_buf_dma;
|
||||
tx_dsg->length = ISCSI_HDR_LEN;
|
||||
|
@ -2009,6 +2015,8 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
|
|||
|
||||
isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
|
||||
txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
|
||||
if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
|
||||
return -ENOMEM;
|
||||
|
||||
isert_cmd->pdu_buf_len = txt_rsp_len;
|
||||
tx_dsg->addr = isert_cmd->pdu_buf_dma;
|
||||
|
|
|
@ -64,6 +64,11 @@ MODULE_LICENSE("Dual BSD/GPL");
|
|||
MODULE_VERSION(DRV_VERSION);
|
||||
MODULE_INFO(release_date, DRV_RELDATE);
|
||||
|
||||
#if !defined(CONFIG_DYNAMIC_DEBUG)
|
||||
#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
|
||||
#define DYNAMIC_DEBUG_BRANCH(descriptor) false
|
||||
#endif
|
||||
|
||||
static unsigned int srp_sg_tablesize;
|
||||
static unsigned int cmd_sg_entries;
|
||||
static unsigned int indirect_sg_entries;
|
||||
|
@ -384,6 +389,9 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
|
|||
max_page_list_len);
|
||||
if (IS_ERR(mr)) {
|
||||
ret = PTR_ERR(mr);
|
||||
if (ret == -ENOMEM)
|
||||
pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
|
||||
dev_name(&device->dev));
|
||||
goto destroy_pool;
|
||||
}
|
||||
d->mr = mr;
|
||||
|
@ -1266,8 +1274,12 @@ static int srp_map_finish_fmr(struct srp_map_state *state,
|
|||
struct ib_pool_fmr *fmr;
|
||||
u64 io_addr = 0;
|
||||
|
||||
if (state->fmr.next >= state->fmr.end)
|
||||
if (state->fmr.next >= state->fmr.end) {
|
||||
shost_printk(KERN_ERR, ch->target->scsi_host,
|
||||
PFX "Out of MRs (mr_per_cmd = %d)\n",
|
||||
ch->target->mr_per_cmd);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(!dev->use_fmr);
|
||||
|
||||
|
@ -1323,8 +1335,12 @@ static int srp_map_finish_fr(struct srp_map_state *state,
|
|||
u32 rkey;
|
||||
int n, err;
|
||||
|
||||
if (state->fr.next >= state->fr.end)
|
||||
if (state->fr.next >= state->fr.end) {
|
||||
shost_printk(KERN_ERR, ch->target->scsi_host,
|
||||
PFX "Out of MRs (mr_per_cmd = %d)\n",
|
||||
ch->target->mr_per_cmd);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(!dev->use_fast_reg);
|
||||
|
||||
|
@ -1556,7 +1572,6 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if defined(DYNAMIC_DATA_DEBUG)
|
||||
static void srp_check_mapping(struct srp_map_state *state,
|
||||
struct srp_rdma_ch *ch, struct srp_request *req,
|
||||
struct scatterlist *scat, int count)
|
||||
|
@ -1580,7 +1595,6 @@ static void srp_check_mapping(struct srp_map_state *state,
|
|||
scsi_bufflen(req->scmnd), desc_len, mr_len,
|
||||
state->ndesc, state->nmdesc);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* srp_map_data() - map SCSI data buffer onto an SRP request
|
||||
|
@ -1669,14 +1683,12 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
|
|||
if (ret < 0)
|
||||
goto unmap;
|
||||
|
||||
#if defined(DYNAMIC_DEBUG)
|
||||
{
|
||||
DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
|
||||
"Memory mapping consistency check");
|
||||
if (unlikely(ddm.flags & _DPRINTK_FLAGS_PRINT))
|
||||
if (DYNAMIC_DEBUG_BRANCH(ddm))
|
||||
srp_check_mapping(&state, ch, req, scat, count);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* We've mapped the request, now pull as much of the indirect
|
||||
* descriptor table as we can into the command buffer. If this
|
||||
|
@ -3287,7 +3299,9 @@ static ssize_t srp_create_target(struct device *dev,
|
|||
*/
|
||||
scsi_host_get(target->scsi_host);
|
||||
|
||||
mutex_lock(&host->add_target_mutex);
|
||||
ret = mutex_lock_interruptible(&host->add_target_mutex);
|
||||
if (ret < 0)
|
||||
goto put;
|
||||
|
||||
ret = srp_parse_options(buf, target);
|
||||
if (ret)
|
||||
|
@ -3443,6 +3457,7 @@ static ssize_t srp_create_target(struct device *dev,
|
|||
out:
|
||||
mutex_unlock(&host->add_target_mutex);
|
||||
|
||||
put:
|
||||
scsi_host_put(target->scsi_host);
|
||||
if (ret < 0)
|
||||
scsi_host_put(target->scsi_host);
|
||||
|
@ -3526,6 +3541,7 @@ static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
|
|||
static void srp_add_one(struct ib_device *device)
|
||||
{
|
||||
struct srp_device *srp_dev;
|
||||
struct ib_device_attr *attr = &device->attrs;
|
||||
struct srp_host *host;
|
||||
int mr_page_shift, p;
|
||||
u64 max_pages_per_mr;
|
||||
|
@ -3540,25 +3556,25 @@ static void srp_add_one(struct ib_device *device)
|
|||
* minimum of 4096 bytes. We're unlikely to build large sglists
|
||||
* out of smaller entries.
|
||||
*/
|
||||
mr_page_shift = max(12, ffs(device->attrs.page_size_cap) - 1);
|
||||
mr_page_shift = max(12, ffs(attr->page_size_cap) - 1);
|
||||
srp_dev->mr_page_size = 1 << mr_page_shift;
|
||||
srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
|
||||
max_pages_per_mr = device->attrs.max_mr_size;
|
||||
max_pages_per_mr = attr->max_mr_size;
|
||||
do_div(max_pages_per_mr, srp_dev->mr_page_size);
|
||||
pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
|
||||
device->attrs.max_mr_size, srp_dev->mr_page_size,
|
||||
attr->max_mr_size, srp_dev->mr_page_size,
|
||||
max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
|
||||
srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
|
||||
max_pages_per_mr);
|
||||
|
||||
srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
|
||||
device->map_phys_fmr && device->unmap_fmr);
|
||||
srp_dev->has_fr = (device->attrs.device_cap_flags &
|
||||
srp_dev->has_fr = (attr->device_cap_flags &
|
||||
IB_DEVICE_MEM_MGT_EXTENSIONS);
|
||||
if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {
|
||||
dev_warn(&device->dev, "neither FMR nor FR is supported\n");
|
||||
} else if (!never_register &&
|
||||
device->attrs.max_mr_size >= 2 * srp_dev->mr_page_size) {
|
||||
attr->max_mr_size >= 2 * srp_dev->mr_page_size) {
|
||||
srp_dev->use_fast_reg = (srp_dev->has_fr &&
|
||||
(!srp_dev->has_fmr || prefer_fr));
|
||||
srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
|
||||
|
@ -3571,13 +3587,13 @@ static void srp_add_one(struct ib_device *device)
|
|||
if (srp_dev->use_fast_reg) {
|
||||
srp_dev->max_pages_per_mr =
|
||||
min_t(u32, srp_dev->max_pages_per_mr,
|
||||
device->attrs.max_fast_reg_page_list_len);
|
||||
attr->max_fast_reg_page_list_len);
|
||||
}
|
||||
srp_dev->mr_max_size = srp_dev->mr_page_size *
|
||||
srp_dev->max_pages_per_mr;
|
||||
pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
|
||||
device->name, mr_page_shift, device->attrs.max_mr_size,
|
||||
device->attrs.max_fast_reg_page_list_len,
|
||||
device->name, mr_page_shift, attr->max_mr_size,
|
||||
attr->max_fast_reg_page_list_len,
|
||||
srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
|
||||
|
||||
INIT_LIST_HEAD(&srp_dev->dev_list);
|
||||
|
|
|
@ -1840,7 +1840,6 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
|
|||
struct srpt_rdma_ch *ch, *tmp_ch;
|
||||
u32 it_iu_len;
|
||||
int i, ret = 0;
|
||||
unsigned char *p;
|
||||
|
||||
WARN_ON_ONCE(irqs_disabled());
|
||||
|
||||
|
@ -1994,21 +1993,18 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
|
|||
be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
|
||||
|
||||
pr_debug("registering session %s\n", ch->sess_name);
|
||||
p = &ch->sess_name[0];
|
||||
|
||||
try_again:
|
||||
ch->sess = target_alloc_session(&sport->port_tpg_1, 0, 0,
|
||||
TARGET_PROT_NORMAL, p, ch, NULL);
|
||||
TARGET_PROT_NORMAL, ch->sess_name, ch,
|
||||
NULL);
|
||||
/* Retry without leading "0x" */
|
||||
if (IS_ERR(ch->sess))
|
||||
ch->sess = target_alloc_session(&sport->port_tpg_1, 0, 0,
|
||||
TARGET_PROT_NORMAL,
|
||||
ch->sess_name + 2, ch, NULL);
|
||||
if (IS_ERR(ch->sess)) {
|
||||
pr_info("Rejected login because no ACL has been"
|
||||
" configured yet for initiator %s.\n", p);
|
||||
/*
|
||||
* XXX: Hack to retry of ch->i_port_id without leading '0x'
|
||||
*/
|
||||
if (p == &ch->sess_name[0]) {
|
||||
p += 2;
|
||||
goto try_again;
|
||||
}
|
||||
pr_info("Rejected login because no ACL has been configured yet for initiator %s.\n",
|
||||
ch->sess_name);
|
||||
rej->reason = cpu_to_be32((PTR_ERR(ch->sess) == -ENOMEM) ?
|
||||
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
|
||||
SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
|
||||
|
|
|
@ -191,8 +191,8 @@ int qede_roce_register_driver(struct qedr_driver *drv)
|
|||
}
|
||||
mutex_unlock(&qedr_dev_list_lock);
|
||||
|
||||
DP_INFO(edev, "qedr: discovered and registered %d RoCE funcs\n",
|
||||
qedr_counter);
|
||||
pr_notice("qedr: discovered and registered %d RoCE funcs\n",
|
||||
qedr_counter);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -43,6 +43,28 @@
|
|||
|
||||
#define NVME_RDMA_MAX_INLINE_SEGMENTS 1
|
||||
|
||||
static const char *const nvme_rdma_cm_status_strs[] = {
|
||||
[NVME_RDMA_CM_INVALID_LEN] = "invalid length",
|
||||
[NVME_RDMA_CM_INVALID_RECFMT] = "invalid record format",
|
||||
[NVME_RDMA_CM_INVALID_QID] = "invalid queue ID",
|
||||
[NVME_RDMA_CM_INVALID_HSQSIZE] = "invalid host SQ size",
|
||||
[NVME_RDMA_CM_INVALID_HRQSIZE] = "invalid host RQ size",
|
||||
[NVME_RDMA_CM_NO_RSC] = "resource not found",
|
||||
[NVME_RDMA_CM_INVALID_IRD] = "invalid IRD",
|
||||
[NVME_RDMA_CM_INVALID_ORD] = "Invalid ORD",
|
||||
};
|
||||
|
||||
static const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status)
|
||||
{
|
||||
size_t index = status;
|
||||
|
||||
if (index < ARRAY_SIZE(nvme_rdma_cm_status_strs) &&
|
||||
nvme_rdma_cm_status_strs[index])
|
||||
return nvme_rdma_cm_status_strs[index];
|
||||
else
|
||||
return "unrecognized reason";
|
||||
};
|
||||
|
||||
/*
|
||||
* We handle AEN commands ourselves and don't even let the
|
||||
* block layer know about them.
|
||||
|
@ -1207,16 +1229,24 @@ static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
|
|||
static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
|
||||
struct rdma_cm_event *ev)
|
||||
{
|
||||
if (ev->param.conn.private_data_len) {
|
||||
struct nvme_rdma_cm_rej *rej =
|
||||
(struct nvme_rdma_cm_rej *)ev->param.conn.private_data;
|
||||
struct rdma_cm_id *cm_id = queue->cm_id;
|
||||
int status = ev->status;
|
||||
const char *rej_msg;
|
||||
const struct nvme_rdma_cm_rej *rej_data;
|
||||
u8 rej_data_len;
|
||||
|
||||
rej_msg = rdma_reject_msg(cm_id, status);
|
||||
rej_data = rdma_consumer_reject_data(cm_id, ev, &rej_data_len);
|
||||
|
||||
if (rej_data && rej_data_len >= sizeof(u16)) {
|
||||
u16 sts = le16_to_cpu(rej_data->sts);
|
||||
|
||||
dev_err(queue->ctrl->ctrl.device,
|
||||
"Connect rejected, status %d.", le16_to_cpu(rej->sts));
|
||||
/* XXX: Think of something clever to do here... */
|
||||
"Connect rejected: status %d (%s) nvme status %d (%s).\n",
|
||||
status, rej_msg, sts, nvme_rdma_cm_msg(sts));
|
||||
} else {
|
||||
dev_err(queue->ctrl->ctrl.device,
|
||||
"Connect rejected, no private data.\n");
|
||||
"Connect rejected: status %d (%s).\n", status, rej_msg);
|
||||
}
|
||||
|
||||
return -ECONNRESET;
|
||||
|
|
|
@ -1358,6 +1358,9 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
|
|||
ret = nvmet_rdma_device_removal(cm_id, queue);
|
||||
break;
|
||||
case RDMA_CM_EVENT_REJECTED:
|
||||
pr_debug("Connection rejected: %s\n",
|
||||
rdma_reject_msg(cm_id, event->status));
|
||||
/* FALLTHROUGH */
|
||||
case RDMA_CM_EVENT_UNREACHABLE:
|
||||
case RDMA_CM_EVENT_CONNECT_ERROR:
|
||||
nvmet_rdma_queue_connect_fail(cm_id, queue);
|
||||
|
|
|
@ -603,4 +603,10 @@ struct ib_cm_sidr_rep_param {
|
|||
int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
|
||||
struct ib_cm_sidr_rep_param *param);
|
||||
|
||||
/**
|
||||
* ibcm_reject_msg - return a pointer to a reject message string.
|
||||
* @reason: Value returned in the REJECT event status field.
|
||||
*/
|
||||
const char *__attribute_const__ ibcm_reject_msg(int reason);
|
||||
|
||||
#endif /* IB_CM_H */
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
#define IB_MGMT_BASE_VERSION 1
|
||||
#define OPA_MGMT_BASE_VERSION 0x80
|
||||
|
||||
#define OPA_SMP_CLASS_VERSION 0x80
|
||||
#define OPA_SM_CLASS_VERSION 0x80
|
||||
|
||||
/* Management classes */
|
||||
#define IB_MGMT_CLASS_SUBN_LID_ROUTED 0x01
|
||||
|
|
|
@ -253,4 +253,10 @@ int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt);
|
|||
int iw_cm_init_qp_attr(struct iw_cm_id *cm_id, struct ib_qp_attr *qp_attr,
|
||||
int *qp_attr_mask);
|
||||
|
||||
/**
|
||||
* iwcm_reject_msg - return a pointer to a reject message string.
|
||||
* @reason: Value returned in the REJECT event status field.
|
||||
*/
|
||||
const char *__attribute_const__ iwcm_reject_msg(int reason);
|
||||
|
||||
#endif /* IW_CM_H */
|
||||
|
|
|
@ -44,8 +44,6 @@
|
|||
#define OPA_MAX_SLS 32
|
||||
#define OPA_MAX_SCS 32
|
||||
|
||||
#define OPA_SMI_CLASS_VERSION 0x80
|
||||
|
||||
#define OPA_LID_PERMISSIVE cpu_to_be32(0xFFFFFFFF)
|
||||
|
||||
struct opa_smp {
|
||||
|
|
|
@ -388,4 +388,29 @@ int rdma_set_afonly(struct rdma_cm_id *id, int afonly);
|
|||
*/
|
||||
__be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr);
|
||||
|
||||
/**
|
||||
* rdma_reject_msg - return a pointer to a reject message string.
|
||||
* @id: Communication identifier that received the REJECT event.
|
||||
* @reason: Value returned in the REJECT event status field.
|
||||
*/
|
||||
const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id,
|
||||
int reason);
|
||||
/**
|
||||
* rdma_is_consumer_reject - return true if the consumer rejected the connect
|
||||
* request.
|
||||
* @id: Communication identifier that received the REJECT event.
|
||||
* @reason: Value returned in the REJECT event status field.
|
||||
*/
|
||||
bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason);
|
||||
|
||||
/**
|
||||
* rdma_consumer_reject_data - return the consumer reject private data and
|
||||
* length, if any.
|
||||
* @id: Communication identifier that received the REJECT event.
|
||||
* @ev: RDMA CM reject event.
|
||||
* @data_len: Pointer to the resulting length of the consumer data.
|
||||
*/
|
||||
const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
|
||||
struct rdma_cm_event *ev, u8 *data_len);
|
||||
|
||||
#endif /* RDMA_CM_H */
|
||||
|
|
|
@ -110,7 +110,7 @@ struct rdma_ucm_bind {
|
|||
__u32 id;
|
||||
__u16 addr_size;
|
||||
__u16 reserved;
|
||||
struct sockaddr_storage addr;
|
||||
struct __kernel_sockaddr_storage addr;
|
||||
};
|
||||
|
||||
struct rdma_ucm_resolve_ip {
|
||||
|
@ -126,8 +126,8 @@ struct rdma_ucm_resolve_addr {
|
|||
__u16 src_size;
|
||||
__u16 dst_size;
|
||||
__u32 reserved;
|
||||
struct sockaddr_storage src_addr;
|
||||
struct sockaddr_storage dst_addr;
|
||||
struct __kernel_sockaddr_storage src_addr;
|
||||
struct __kernel_sockaddr_storage dst_addr;
|
||||
};
|
||||
|
||||
struct rdma_ucm_resolve_route {
|
||||
|
@ -164,8 +164,8 @@ struct rdma_ucm_query_addr_resp {
|
|||
__u16 pkey;
|
||||
__u16 src_size;
|
||||
__u16 dst_size;
|
||||
struct sockaddr_storage src_addr;
|
||||
struct sockaddr_storage dst_addr;
|
||||
struct __kernel_sockaddr_storage src_addr;
|
||||
struct __kernel_sockaddr_storage dst_addr;
|
||||
};
|
||||
|
||||
struct rdma_ucm_query_path_resp {
|
||||
|
@ -257,7 +257,7 @@ struct rdma_ucm_join_mcast {
|
|||
__u32 id;
|
||||
__u16 addr_size;
|
||||
__u16 join_flags;
|
||||
struct sockaddr_storage addr;
|
||||
struct __kernel_sockaddr_storage addr;
|
||||
};
|
||||
|
||||
struct rdma_ucm_get_event {
|
||||
|
|
|
@ -100,11 +100,14 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
|
|||
trans->cm_connect_complete(conn, event);
|
||||
break;
|
||||
|
||||
case RDMA_CM_EVENT_REJECTED:
|
||||
rdsdebug("Connection rejected: %s\n",
|
||||
rdma_reject_msg(cm_id, event->status));
|
||||
/* FALLTHROUGH */
|
||||
case RDMA_CM_EVENT_ADDR_ERROR:
|
||||
case RDMA_CM_EVENT_ROUTE_ERROR:
|
||||
case RDMA_CM_EVENT_CONNECT_ERROR:
|
||||
case RDMA_CM_EVENT_UNREACHABLE:
|
||||
case RDMA_CM_EVENT_REJECTED:
|
||||
case RDMA_CM_EVENT_DEVICE_REMOVAL:
|
||||
case RDMA_CM_EVENT_ADDR_CHANGE:
|
||||
if (conn)
|
||||
|
|
Loading…
Reference in New Issue