Merge branches 'cxgb4', 'ipoib' and 'mlx4' into for-next

This commit is contained in:
Roland Dreier 2012-12-19 23:03:43 -08:00
commit d72623b665
9 changed files with 134 additions and 45 deletions

View File

@ -752,6 +752,9 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
dev->trans_start = jiffies; dev->trans_start = jiffies;
++tx->tx_head; ++tx->tx_head;
skb_orphan(skb);
skb_dst_drop(skb);
if (++priv->tx_outstanding == ipoib_sendq_size) { if (++priv->tx_outstanding == ipoib_sendq_size) {
ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
tx->qp->qp_num); tx->qp->qp_num);

View File

@ -615,8 +615,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
address->last_send = priv->tx_head; address->last_send = priv->tx_head;
++priv->tx_head; ++priv->tx_head;
skb_orphan(skb);
skb_orphan(skb);
skb_dst_drop(skb);
} }
if (unlikely(priv->tx_outstanding > MAX_SEND_CQE)) if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))

View File

@ -1338,6 +1338,7 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
{ {
struct mlx4_cmd_mailbox *mailbox; struct mlx4_cmd_mailbox *mailbox;
__be32 *outbox; __be32 *outbox;
u32 dword_field;
int err; int err;
u8 byte_field; u8 byte_field;
@ -1372,10 +1373,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
} else {
MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET);
if (byte_field & 0x8)
param->steering_mode = MLX4_STEERING_MODE_B0;
else
param->steering_mode = MLX4_STEERING_MODE_A0;
}
/* steering attributes */ /* steering attributes */
if (dev->caps.steering_mode == if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
MLX4_STEERING_MODE_DEVICE_MANAGED) {
MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET); MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
MLX4_GET(param->log_mc_entry_sz, outbox, MLX4_GET(param->log_mc_entry_sz, outbox,
INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);

View File

@ -172,6 +172,7 @@ struct mlx4_init_hca_param {
u8 log_uar_sz; u8 log_uar_sz;
u8 uar_page_sz; /* log pg sz in 4k chunks */ u8 uar_page_sz; /* log pg sz in 4k chunks */
u8 fs_hash_enable_bits; u8 fs_hash_enable_bits;
u8 steering_mode; /* for QUERY_HCA */
u64 dev_cap_enabled; u64 dev_cap_enabled;
}; };

View File

@ -85,15 +85,15 @@ static int probe_vf;
module_param(probe_vf, int, 0644); module_param(probe_vf, int, 0644);
MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)"); MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)");
int mlx4_log_num_mgm_entry_size = 10; int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
module_param_named(log_num_mgm_entry_size, module_param_named(log_num_mgm_entry_size,
mlx4_log_num_mgm_entry_size, int, 0444); mlx4_log_num_mgm_entry_size, int, 0444);
MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
" of qp per mcg, for example:" " of qp per mcg, for example:"
" 10 gives 248.range: 9<=" " 10 gives 248.range: 7 <="
" log_num_mgm_entry_size <= 12." " log_num_mgm_entry_size <= 12."
" Not in use with device managed" " To activate device managed"
" flow steering"); " flow steering when available, set to -1");
static bool enable_64b_cqe_eqe; static bool enable_64b_cqe_eqe;
module_param(enable_64b_cqe_eqe, bool, 0444); module_param(enable_64b_cqe_eqe, bool, 0444);
@ -281,28 +281,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_gso_sz = dev_cap->max_gso_sz; dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
dev->caps.fs_log_max_ucast_qp_range_size =
dev_cap->fs_log_max_ucast_qp_range_size;
} else {
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) {
dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
} else {
dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
mlx4_warn(dev, "Must have UC_STEER and MC_STEER flags "
"set to use B0 steering. Falling back to A0 steering mode.\n");
}
dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
}
mlx4_dbg(dev, "Steering mode is: %s\n",
mlx4_steering_mode_str(dev->caps.steering_mode));
/* Sense port always allowed on supported devices for ConnectX-1 and -2 */ /* Sense port always allowed on supported devices for ConnectX-1 and -2 */
if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT) if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
@ -493,6 +471,23 @@ int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
} }
EXPORT_SYMBOL(mlx4_is_slave_active); EXPORT_SYMBOL(mlx4_is_slave_active);
static void slave_adjust_steering_mode(struct mlx4_dev *dev,
struct mlx4_dev_cap *dev_cap,
struct mlx4_init_hca_param *hca_param)
{
dev->caps.steering_mode = hca_param->steering_mode;
if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
dev->caps.fs_log_max_ucast_qp_range_size =
dev_cap->fs_log_max_ucast_qp_range_size;
} else
dev->caps.num_qp_per_mgm =
4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2);
mlx4_dbg(dev, "Steering mode is: %s\n",
mlx4_steering_mode_str(dev->caps.steering_mode));
}
static int mlx4_slave_cap(struct mlx4_dev *dev) static int mlx4_slave_cap(struct mlx4_dev *dev)
{ {
int err; int err;
@ -635,6 +630,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
dev->caps.cqe_size = 32; dev->caps.cqe_size = 32;
} }
slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
return 0; return 0;
err_mem: err_mem:
@ -1321,6 +1318,59 @@ static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
} }
} }
static int choose_log_fs_mgm_entry_size(int qp_per_entry)
{
int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE;
for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE;
i++) {
if (qp_per_entry <= 4 * ((1 << i) / 16 - 2))
break;
}
return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1;
}
static void choose_steering_mode(struct mlx4_dev *dev,
struct mlx4_dev_cap *dev_cap)
{
if (mlx4_log_num_mgm_entry_size == -1 &&
dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
(!mlx4_is_mfunc(dev) ||
(dev_cap->fs_max_num_qp_per_entry >= (num_vfs + 1))) &&
choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
dev->oper_log_mgm_entry_size =
choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry);
dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
dev->caps.fs_log_max_ucast_qp_range_size =
dev_cap->fs_log_max_ucast_qp_range_size;
} else {
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
else {
dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags "
"set to use B0 steering. Falling back to A0 steering mode.\n");
}
dev->oper_log_mgm_entry_size =
mlx4_log_num_mgm_entry_size > 0 ?
mlx4_log_num_mgm_entry_size :
MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
}
mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, "
"modparam log_num_mgm_entry_size = %d\n",
mlx4_steering_mode_str(dev->caps.steering_mode),
dev->oper_log_mgm_entry_size,
mlx4_log_num_mgm_entry_size);
}
static int mlx4_init_hca(struct mlx4_dev *dev) static int mlx4_init_hca(struct mlx4_dev *dev)
{ {
struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_priv *priv = mlx4_priv(dev);
@ -1360,6 +1410,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
goto err_stop_fw; goto err_stop_fw;
} }
choose_steering_mode(dev, &dev_cap);
if (mlx4_is_master(dev)) if (mlx4_is_master(dev))
mlx4_parav_master_pf_caps(dev); mlx4_parav_master_pf_caps(dev);
@ -2452,6 +2504,17 @@ static int __init mlx4_verify_params(void)
port_type_array[0] = true; port_type_array[0] = true;
} }
if (mlx4_log_num_mgm_entry_size != -1 &&
(mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) {
pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not "
"in legal range (-1 or %d..%d)\n",
mlx4_log_num_mgm_entry_size,
MLX4_MIN_MGM_LOG_ENTRY_SIZE,
MLX4_MAX_MGM_LOG_ENTRY_SIZE);
return -1;
}
return 0; return 0;
} }

View File

@ -54,12 +54,7 @@ struct mlx4_mgm {
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
{ {
if (dev->caps.steering_mode == return 1 << dev->oper_log_mgm_entry_size;
MLX4_STEERING_MODE_DEVICE_MANAGED)
return 1 << MLX4_FS_MGM_LOG_ENTRY_SIZE;
else
return min((1 << mlx4_log_num_mgm_entry_size),
MLX4_MAX_MGM_ENTRY_SIZE);
} }
int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)

View File

@ -94,8 +94,10 @@ enum {
}; };
enum { enum {
MLX4_MAX_MGM_ENTRY_SIZE = 0x1000, MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE = 10,
MLX4_MAX_QP_PER_MGM = 4 * (MLX4_MAX_MGM_ENTRY_SIZE / 16 - 2), MLX4_MIN_MGM_LOG_ENTRY_SIZE = 7,
MLX4_MAX_MGM_LOG_ENTRY_SIZE = 12,
MLX4_MAX_QP_PER_MGM = 4 * ((1 << MLX4_MAX_MGM_LOG_ENTRY_SIZE) / 16 - 2),
MLX4_MTT_ENTRY_PER_SEG = 8, MLX4_MTT_ENTRY_PER_SEG = 8,
}; };

View File

@ -3071,6 +3071,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC]; struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
int err; int err;
int qpn;
struct mlx4_net_trans_rule_hw_ctrl *ctrl; struct mlx4_net_trans_rule_hw_ctrl *ctrl;
struct _rule_hw *rule_header; struct _rule_hw *rule_header;
int header_id; int header_id;
@ -3080,13 +3081,21 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
return -EOPNOTSUPP; return -EOPNOTSUPP;
ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
err = get_res(dev, slave, qpn, RES_QP, NULL);
if (err) {
pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
return err;
}
rule_header = (struct _rule_hw *)(ctrl + 1); rule_header = (struct _rule_hw *)(ctrl + 1);
header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id)); header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
switch (header_id) { switch (header_id) {
case MLX4_NET_TRANS_RULE_ID_ETH: case MLX4_NET_TRANS_RULE_ID_ETH:
if (validate_eth_header_mac(slave, rule_header, rlist)) if (validate_eth_header_mac(slave, rule_header, rlist)) {
return -EINVAL; err = -EINVAL;
goto err_put;
}
break; break;
case MLX4_NET_TRANS_RULE_ID_IB: case MLX4_NET_TRANS_RULE_ID_IB:
break; break;
@ -3094,14 +3103,17 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
case MLX4_NET_TRANS_RULE_ID_TCP: case MLX4_NET_TRANS_RULE_ID_TCP:
case MLX4_NET_TRANS_RULE_ID_UDP: case MLX4_NET_TRANS_RULE_ID_UDP:
pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n"); pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
if (add_eth_header(dev, slave, inbox, rlist, header_id)) if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
return -EINVAL; err = -EINVAL;
goto err_put;
}
vhcr->in_modifier += vhcr->in_modifier +=
sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
break; break;
default: default:
pr_err("Corrupted mailbox.\n"); pr_err("Corrupted mailbox.\n");
return -EINVAL; err = -EINVAL;
goto err_put;
} }
err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param, err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
@ -3109,16 +3121,18 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE); MLX4_CMD_NATIVE);
if (err) if (err)
return err; goto err_put;
err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0); err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
if (err) { if (err) {
mlx4_err(dev, "Fail to add flow steering resources.\n "); mlx4_err(dev, "Fail to add flow steering resources.\n ");
/* detach rule*/ /* detach rule*/
mlx4_cmd(dev, vhcr->out_param, 0, 0, mlx4_cmd(dev, vhcr->out_param, 0, 0,
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE); MLX4_CMD_NATIVE);
} }
err_put:
put_res(dev, slave, qpn, RES_QP);
return err; return err;
} }

View File

@ -625,6 +625,7 @@ struct mlx4_dev {
u8 rev_id; u8 rev_id;
char board_id[MLX4_BOARD_ID_LEN]; char board_id[MLX4_BOARD_ID_LEN];
int num_vfs; int num_vfs;
int oper_log_mgm_entry_size;
u64 regid_promisc_array[MLX4_MAX_PORTS + 1]; u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
}; };