Merge branch 'mlx4-HA-LAG-SRIOV-VF'

Or Gerlitz says:

====================
Add HA and LAG support for mlx4 SRIOV VFs

This series is built upon the code added in commit ce388ff "Merge branch
'mlx4-next'" which added HA and LAG support to mlx4 RoCE and SRIOV services.

We add HA and Link Aggregation support to single ported mlx4 Ethernet VFs.

In this case, the PF Ethernet interfaces are bonded, the VFs see single
port HW devices (already supported) -- however, now this port is highly
available. This means that all VF HW QPs (both VF Ethernet driver and VF
RoCE / RAW QPs) are subject to the V2P (Virtual-To-Physical) mapping which
is managed by the PF driver, and hence resilient across link failures and
such events.

When bonding operates in Dynamic link aggregation (802.3ad) mode, traffic
from each VF will go over the VF "base port" (the port this VF is assigned
to by the admin) as long as this port is up. When the port fails, traffic
from all VFs that are defined on this port will move to the other port, and
be back to their base-port when it recovers.

Moni and Or.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2015-12-06 22:40:46 -05:00
commit 0fe3e20416
9 changed files with 914 additions and 49 deletions
drivers
infiniband/hw/mlx4
net/ethernet/mellanox/mlx4
include/linux/mlx4

View File

@ -40,6 +40,7 @@
#include <linux/gfp.h> #include <linux/gfp.h>
#include <rdma/ib_pma.h> #include <rdma/ib_pma.h>
#include <linux/mlx4/driver.h>
#include "mlx4_ib.h" #include "mlx4_ib.h"
enum { enum {
@ -606,8 +607,8 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
struct ib_mad *mad) struct ib_mad *mad)
{ {
struct mlx4_ib_dev *dev = to_mdev(ibdev); struct mlx4_ib_dev *dev = to_mdev(ibdev);
int err; int err, other_port;
int slave; int slave = -1;
u8 *slave_id; u8 *slave_id;
int is_eth = 0; int is_eth = 0;
@ -625,7 +626,17 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n"); mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n");
return -EINVAL; return -EINVAL;
} }
if (mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave)) { err = mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave);
if (err && mlx4_is_mf_bonded(dev->dev)) {
other_port = (port == 1) ? 2 : 1;
err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, grh->dgid.raw, &slave);
if (!err) {
port = other_port;
pr_debug("resolved slave %d from gid %pI6 wire port %d other %d\n",
slave, grh->dgid.raw, port, other_port);
}
}
if (err) {
mlx4_ib_warn(ibdev, "failed matching grh\n"); mlx4_ib_warn(ibdev, "failed matching grh\n");
return -ENOENT; return -ENOENT;
} }

View File

@ -151,6 +151,17 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
eqe = next_slave_event_eqe(slave_eq)) { eqe = next_slave_event_eqe(slave_eq)) {
slave = eqe->slave_id; slave = eqe->slave_id;
if (eqe->type == MLX4_EVENT_TYPE_PORT_CHANGE &&
eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN &&
mlx4_is_bonded(dev)) {
struct mlx4_port_cap port_cap;
if (!mlx4_QUERY_PORT(dev, 1, &port_cap) && port_cap.link_state)
goto consume;
if (!mlx4_QUERY_PORT(dev, 2, &port_cap) && port_cap.link_state)
goto consume;
}
/* All active slaves need to receive the event */ /* All active slaves need to receive the event */
if (slave == ALL_SLAVES) { if (slave == ALL_SLAVES) {
for (i = 0; i <= dev->persist->num_vfs; i++) { for (i = 0; i <= dev->persist->num_vfs; i++) {
@ -174,6 +185,7 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
mlx4_warn(dev, "Failed to generate event for slave %d\n", mlx4_warn(dev, "Failed to generate event for slave %d\n",
slave); slave);
} }
consume:
++slave_eq->cons; ++slave_eq->cons;
} }
} }
@ -594,7 +606,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
break; break;
for (i = 0; i < dev->persist->num_vfs + 1; for (i = 0; i < dev->persist->num_vfs + 1;
i++) { i++) {
if (!test_bit(i, slaves_port.slaves)) int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port);
if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev))
continue; continue;
if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
if (i == mlx4_master_func_num(dev)) if (i == mlx4_master_func_num(dev))
@ -606,7 +620,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
eqe->event.port_change.port = eqe->event.port_change.port =
cpu_to_be32( cpu_to_be32(
(be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
| (mlx4_phys_to_slave_port(dev, i, port) << 28)); | (reported_port << 28));
mlx4_slave_event(dev, i, eqe); mlx4_slave_event(dev, i, eqe);
} }
} else { /* IB port */ } else { /* IB port */
@ -636,7 +650,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
for (i = 0; for (i = 0;
i < dev->persist->num_vfs + 1; i < dev->persist->num_vfs + 1;
i++) { i++) {
if (!test_bit(i, slaves_port.slaves)) int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port);
if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev))
continue; continue;
if (i == mlx4_master_func_num(dev)) if (i == mlx4_master_func_num(dev))
continue; continue;
@ -645,7 +661,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
eqe->event.port_change.port = eqe->event.port_change.port =
cpu_to_be32( cpu_to_be32(
(be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
| (mlx4_phys_to_slave_port(dev, i, port) << 28)); | (reported_port << 28));
mlx4_slave_event(dev, i, eqe); mlx4_slave_event(dev, i, eqe);
} }
} }

View File

@ -1104,6 +1104,7 @@ int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_c
goto out; goto out;
MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET); MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
port_cap->link_state = (field & 0x80) >> 7;
port_cap->supported_port_types = field & 3; port_cap->supported_port_types = field & 3;
port_cap->suggested_type = (field >> 3) & 1; port_cap->suggested_type = (field >> 3) & 1;
port_cap->default_sense = (field >> 4) & 1; port_cap->default_sense = (field >> 4) & 1;
@ -1310,6 +1311,15 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
port_type |= MLX4_PORT_LINK_UP_MASK; port_type |= MLX4_PORT_LINK_UP_MASK;
else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state) else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state)
port_type &= ~MLX4_PORT_LINK_UP_MASK; port_type &= ~MLX4_PORT_LINK_UP_MASK;
else if (IFLA_VF_LINK_STATE_AUTO == admin_link_state && mlx4_is_bonded(dev)) {
int other_port = (port == 1) ? 2 : 1;
struct mlx4_port_cap port_cap;
err = mlx4_QUERY_PORT(dev, other_port, &port_cap);
if (err)
goto out;
port_type |= (port_cap.link_state << 7);
}
MLX4_PUT(outbox->buf, port_type, MLX4_PUT(outbox->buf, port_type,
QUERY_PORT_SUPPORTED_TYPE_OFFSET); QUERY_PORT_SUPPORTED_TYPE_OFFSET);
@ -1325,7 +1335,7 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
MLX4_PUT(outbox->buf, short_field, MLX4_PUT(outbox->buf, short_field,
QUERY_PORT_CUR_MAX_PKEY_OFFSET); QUERY_PORT_CUR_MAX_PKEY_OFFSET);
} }
out:
return err; return err;
} }

View File

@ -44,6 +44,7 @@ struct mlx4_mod_stat_cfg {
}; };
struct mlx4_port_cap { struct mlx4_port_cap {
u8 link_state;
u8 supported_port_types; u8 supported_port_types;
u8 suggested_type; u8 suggested_type;
u8 default_sense; u8 default_sense;

View File

@ -1221,6 +1221,76 @@ static ssize_t set_port_ib_mtu(struct device *dev,
return err ? err : count; return err ? err : count;
} }
/* bond for multi-function device */
#define MAX_MF_BOND_ALLOWED_SLAVES 63
static int mlx4_mf_bond(struct mlx4_dev *dev)
{
int err = 0;
struct mlx4_slaves_pport slaves_port1;
struct mlx4_slaves_pport slaves_port2;
DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX);
slaves_port1 = mlx4_phys_to_slaves_pport(dev, 1);
slaves_port2 = mlx4_phys_to_slaves_pport(dev, 2);
bitmap_and(slaves_port_1_2,
slaves_port1.slaves, slaves_port2.slaves,
dev->persist->num_vfs + 1);
/* only single port vfs are allowed */
if (bitmap_weight(slaves_port_1_2, dev->persist->num_vfs + 1) > 1) {
mlx4_warn(dev, "HA mode unsupported for dual ported VFs\n");
return -EINVAL;
}
/* limit on maximum allowed VFs */
if ((bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) +
bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1)) >
MAX_MF_BOND_ALLOWED_SLAVES)
return -EINVAL;
if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n");
return -EINVAL;
}
err = mlx4_bond_mac_table(dev);
if (err)
return err;
err = mlx4_bond_vlan_table(dev);
if (err)
goto err1;
err = mlx4_bond_fs_rules(dev);
if (err)
goto err2;
return 0;
err2:
(void)mlx4_unbond_vlan_table(dev);
err1:
(void)mlx4_unbond_mac_table(dev);
return err;
}
static int mlx4_mf_unbond(struct mlx4_dev *dev)
{
int ret, ret1;
ret = mlx4_unbond_fs_rules(dev);
if (ret)
mlx4_warn(dev, "multifunction unbond for flow rules failedi (%d)\n", ret);
ret1 = mlx4_unbond_mac_table(dev);
if (ret1) {
mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1);
ret = ret1;
}
ret1 = mlx4_unbond_vlan_table(dev);
if (ret1) {
mlx4_warn(dev, "multifunction unbond for VLAN table failed (%d)\n", ret1);
ret = ret1;
}
return ret;
}
int mlx4_bond(struct mlx4_dev *dev) int mlx4_bond(struct mlx4_dev *dev)
{ {
int ret = 0; int ret = 0;
@ -1228,16 +1298,23 @@ int mlx4_bond(struct mlx4_dev *dev)
mutex_lock(&priv->bond_mutex); mutex_lock(&priv->bond_mutex);
if (!mlx4_is_bonded(dev)) if (!mlx4_is_bonded(dev)) {
ret = mlx4_do_bond(dev, true); ret = mlx4_do_bond(dev, true);
else if (ret)
ret = 0; mlx4_err(dev, "Failed to bond device: %d\n", ret);
if (!ret && mlx4_is_master(dev)) {
ret = mlx4_mf_bond(dev);
if (ret) {
mlx4_err(dev, "bond for multifunction failed\n");
mlx4_do_bond(dev, false);
}
}
}
mutex_unlock(&priv->bond_mutex); mutex_unlock(&priv->bond_mutex);
if (ret) if (!ret)
mlx4_err(dev, "Failed to bond device: %d\n", ret);
else
mlx4_dbg(dev, "Device is bonded\n"); mlx4_dbg(dev, "Device is bonded\n");
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(mlx4_bond); EXPORT_SYMBOL_GPL(mlx4_bond);
@ -1249,14 +1326,24 @@ int mlx4_unbond(struct mlx4_dev *dev)
mutex_lock(&priv->bond_mutex); mutex_lock(&priv->bond_mutex);
if (mlx4_is_bonded(dev)) if (mlx4_is_bonded(dev)) {
int ret2 = 0;
ret = mlx4_do_bond(dev, false); ret = mlx4_do_bond(dev, false);
if (ret)
mlx4_err(dev, "Failed to unbond device: %d\n", ret);
if (mlx4_is_master(dev))
ret2 = mlx4_mf_unbond(dev);
if (ret2) {
mlx4_warn(dev, "Failed to unbond device for multifunction (%d)\n", ret2);
ret = ret2;
}
}
mutex_unlock(&priv->bond_mutex); mutex_unlock(&priv->bond_mutex);
if (ret) if (!ret)
mlx4_err(dev, "Failed to unbond device: %d\n", ret);
else
mlx4_dbg(dev, "Device is unbonded\n"); mlx4_dbg(dev, "Device is unbonded\n");
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(mlx4_unbond); EXPORT_SYMBOL_GPL(mlx4_unbond);

View File

@ -736,6 +736,7 @@ struct mlx4_catas_err {
struct mlx4_mac_table { struct mlx4_mac_table {
__be64 entries[MLX4_MAX_MAC_NUM]; __be64 entries[MLX4_MAX_MAC_NUM];
int refs[MLX4_MAX_MAC_NUM]; int refs[MLX4_MAX_MAC_NUM];
bool is_dup[MLX4_MAX_MAC_NUM];
struct mutex mutex; struct mutex mutex;
int total; int total;
int max; int max;
@ -758,6 +759,7 @@ struct mlx4_roce_gid_table {
struct mlx4_vlan_table { struct mlx4_vlan_table {
__be32 entries[MLX4_MAX_VLAN_NUM]; __be32 entries[MLX4_MAX_VLAN_NUM];
int refs[MLX4_MAX_VLAN_NUM]; int refs[MLX4_MAX_VLAN_NUM];
int is_dup[MLX4_MAX_VLAN_NUM];
struct mutex mutex; struct mutex mutex;
int total; int total;
int max; int max;
@ -1225,6 +1227,10 @@ void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
struct mlx4_roce_gid_table *table); struct mlx4_roce_gid_table *table);
void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
int mlx4_bond_vlan_table(struct mlx4_dev *dev);
int mlx4_unbond_vlan_table(struct mlx4_dev *dev);
int mlx4_bond_mac_table(struct mlx4_dev *dev);
int mlx4_unbond_mac_table(struct mlx4_dev *dev);
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz); int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz);
/* resource tracker functions*/ /* resource tracker functions*/
@ -1385,6 +1391,8 @@ int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave); int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
int mlx4_config_mad_demux(struct mlx4_dev *dev); int mlx4_config_mad_demux(struct mlx4_dev *dev);
int mlx4_do_bond(struct mlx4_dev *dev, bool enable); int mlx4_do_bond(struct mlx4_dev *dev, bool enable);
int mlx4_bond_fs_rules(struct mlx4_dev *dev);
int mlx4_unbond_fs_rules(struct mlx4_dev *dev);
enum mlx4_zone_flags { enum mlx4_zone_flags {
MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO = 1UL << 0, MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO = 1UL << 0,

View File

@ -61,6 +61,7 @@ void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
table->entries[i] = 0; table->entries[i] = 0;
table->refs[i] = 0; table->refs[i] = 0;
table->is_dup[i] = false;
} }
table->max = 1 << dev->caps.log_num_macs; table->max = 1 << dev->caps.log_num_macs;
table->total = 0; table->total = 0;
@ -74,6 +75,7 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
table->entries[i] = 0; table->entries[i] = 0;
table->refs[i] = 0; table->refs[i] = 0;
table->is_dup[i] = false;
} }
table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR; table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR;
table->total = 0; table->total = 0;
@ -159,21 +161,94 @@ int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx)
} }
EXPORT_SYMBOL_GPL(mlx4_find_cached_mac); EXPORT_SYMBOL_GPL(mlx4_find_cached_mac);
static bool mlx4_need_mf_bond(struct mlx4_dev *dev)
{
int i, num_eth_ports = 0;
if (!mlx4_is_mfunc(dev))
return false;
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
++num_eth_ports;
return (num_eth_ports == 2) ? true : false;
}
int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{ {
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_table *table = &info->mac_table; struct mlx4_mac_table *table = &info->mac_table;
int i, err = 0; int i, err = 0;
int free = -1; int free = -1;
int free_for_dup = -1;
bool dup = mlx4_is_mf_bonded(dev);
u8 dup_port = (port == 1) ? 2 : 1;
struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
bool need_mf_bond = mlx4_need_mf_bond(dev);
bool can_mf_bond = true;
mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n", mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d %s duplicate\n",
(unsigned long long) mac, port); (unsigned long long)mac, port,
dup ? "with" : "without");
if (need_mf_bond) {
if (port == 1) {
mutex_lock(&table->mutex);
mutex_lock(&dup_table->mutex);
} else {
mutex_lock(&dup_table->mutex);
mutex_lock(&table->mutex);
}
} else {
mutex_lock(&table->mutex);
}
if (need_mf_bond) {
int index_at_port = -1;
int index_at_dup_port = -1;
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))))
index_at_port = i;
if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i]))))
index_at_dup_port = i;
}
/* check that same mac is not in the tables at different indices */
if ((index_at_port != index_at_dup_port) &&
(index_at_port >= 0) &&
(index_at_dup_port >= 0))
can_mf_bond = false;
/* If the mac is already in the primary table, the slot must be
* available in the duplicate table as well.
*/
if (index_at_port >= 0 && index_at_dup_port < 0 &&
dup_table->refs[index_at_port]) {
can_mf_bond = false;
}
/* If the mac is already in the duplicate table, check that the
* corresponding index is not occupied in the primary table, or
* the primary table already contains the mac at the same index.
* Otherwise, you cannot bond (primary contains a different mac
* at that index).
*/
if (index_at_dup_port >= 0) {
if (!table->refs[index_at_dup_port] ||
((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[index_at_dup_port]))))
free_for_dup = index_at_dup_port;
else
can_mf_bond = false;
}
}
mutex_lock(&table->mutex);
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
if (!table->refs[i]) { if (!table->refs[i]) {
if (free < 0) if (free < 0)
free = i; free = i;
if (free_for_dup < 0 && need_mf_bond && can_mf_bond) {
if (!dup_table->refs[i])
free_for_dup = i;
}
continue; continue;
} }
@ -182,10 +257,30 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
/* MAC already registered, increment ref count */ /* MAC already registered, increment ref count */
err = i; err = i;
++table->refs[i]; ++table->refs[i];
if (dup) {
u64 dup_mac = MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i]);
if (dup_mac != mac || !dup_table->is_dup[i]) {
mlx4_warn(dev, "register mac: expect duplicate mac 0x%llx on port %d index %d\n",
mac, dup_port, i);
}
}
goto out; goto out;
} }
} }
if (need_mf_bond && (free_for_dup < 0)) {
if (dup) {
mlx4_warn(dev, "Fail to allocate duplicate MAC table entry\n");
mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n");
dup = false;
}
can_mf_bond = false;
}
if (need_mf_bond && can_mf_bond)
free = free_for_dup;
mlx4_dbg(dev, "Free MAC index is %d\n", free); mlx4_dbg(dev, "Free MAC index is %d\n", free);
if (table->total == table->max) { if (table->total == table->max) {
@ -205,10 +300,35 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
goto out; goto out;
} }
table->refs[free] = 1; table->refs[free] = 1;
err = free; table->is_dup[free] = false;
++table->total; ++table->total;
if (dup) {
dup_table->refs[free] = 0;
dup_table->is_dup[free] = true;
dup_table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries);
if (unlikely(err)) {
mlx4_warn(dev, "Failed adding duplicate mac: 0x%llx\n", mac);
dup_table->is_dup[free] = false;
dup_table->entries[free] = 0;
goto out;
}
++dup_table->total;
}
err = free;
out: out:
mutex_unlock(&table->mutex); if (need_mf_bond) {
if (port == 2) {
mutex_unlock(&table->mutex);
mutex_unlock(&dup_table->mutex);
} else {
mutex_unlock(&dup_table->mutex);
mutex_unlock(&table->mutex);
}
} else {
mutex_unlock(&table->mutex);
}
return err; return err;
} }
EXPORT_SYMBOL_GPL(__mlx4_register_mac); EXPORT_SYMBOL_GPL(__mlx4_register_mac);
@ -255,6 +375,9 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
struct mlx4_port_info *info; struct mlx4_port_info *info;
struct mlx4_mac_table *table; struct mlx4_mac_table *table;
int index; int index;
bool dup = mlx4_is_mf_bonded(dev);
u8 dup_port = (port == 1) ? 2 : 1;
struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
if (port < 1 || port > dev->caps.num_ports) { if (port < 1 || port > dev->caps.num_ports) {
mlx4_warn(dev, "invalid port number (%d), aborting...\n", port); mlx4_warn(dev, "invalid port number (%d), aborting...\n", port);
@ -262,22 +385,59 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
} }
info = &mlx4_priv(dev)->port[port]; info = &mlx4_priv(dev)->port[port];
table = &info->mac_table; table = &info->mac_table;
mutex_lock(&table->mutex);
if (dup) {
if (port == 1) {
mutex_lock(&table->mutex);
mutex_lock(&dup_table->mutex);
} else {
mutex_lock(&dup_table->mutex);
mutex_lock(&table->mutex);
}
} else {
mutex_lock(&table->mutex);
}
index = find_index(dev, table, mac); index = find_index(dev, table, mac);
if (validate_index(dev, table, index)) if (validate_index(dev, table, index))
goto out; goto out;
if (--table->refs[index]) {
if (--table->refs[index] || table->is_dup[index]) {
mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n", mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n",
index); index);
if (!table->refs[index])
dup_table->is_dup[index] = false;
goto out; goto out;
} }
table->entries[index] = 0; table->entries[index] = 0;
mlx4_set_port_mac_table(dev, port, table->entries); if (mlx4_set_port_mac_table(dev, port, table->entries))
mlx4_warn(dev, "Fail to set mac in port %d during unregister\n", port);
--table->total; --table->total;
if (dup) {
dup_table->is_dup[index] = false;
if (dup_table->refs[index])
goto out;
dup_table->entries[index] = 0;
if (mlx4_set_port_mac_table(dev, dup_port, dup_table->entries))
mlx4_warn(dev, "Fail to set mac in duplicate port %d during unregister\n", dup_port);
--table->total;
}
out: out:
mutex_unlock(&table->mutex); if (dup) {
if (port == 2) {
mutex_unlock(&table->mutex);
mutex_unlock(&dup_table->mutex);
} else {
mutex_unlock(&dup_table->mutex);
mutex_unlock(&table->mutex);
}
} else {
mutex_unlock(&table->mutex);
}
} }
EXPORT_SYMBOL_GPL(__mlx4_unregister_mac); EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
@ -311,9 +471,22 @@ int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
struct mlx4_mac_table *table = &info->mac_table; struct mlx4_mac_table *table = &info->mac_table;
int index = qpn - info->base_qpn; int index = qpn - info->base_qpn;
int err = 0; int err = 0;
bool dup = mlx4_is_mf_bonded(dev);
u8 dup_port = (port == 1) ? 2 : 1;
struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
/* CX1 doesn't support multi-functions */ /* CX1 doesn't support multi-functions */
mutex_lock(&table->mutex); if (dup) {
if (port == 1) {
mutex_lock(&table->mutex);
mutex_lock(&dup_table->mutex);
} else {
mutex_lock(&dup_table->mutex);
mutex_lock(&table->mutex);
}
} else {
mutex_lock(&table->mutex);
}
err = validate_index(dev, table, index); err = validate_index(dev, table, index);
if (err) if (err)
@ -326,9 +499,30 @@ int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
mlx4_err(dev, "Failed adding MAC: 0x%llx\n", mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
(unsigned long long) new_mac); (unsigned long long) new_mac);
table->entries[index] = 0; table->entries[index] = 0;
} else {
if (dup) {
dup_table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries);
if (unlikely(err)) {
mlx4_err(dev, "Failed adding duplicate MAC: 0x%llx\n",
(unsigned long long)new_mac);
dup_table->entries[index] = 0;
}
}
} }
out: out:
mutex_unlock(&table->mutex); if (dup) {
if (port == 2) {
mutex_unlock(&table->mutex);
mutex_unlock(&dup_table->mutex);
} else {
mutex_unlock(&dup_table->mutex);
mutex_unlock(&table->mutex);
}
} else {
mutex_unlock(&table->mutex);
}
return err; return err;
} }
EXPORT_SYMBOL_GPL(__mlx4_replace_mac); EXPORT_SYMBOL_GPL(__mlx4_replace_mac);
@ -380,8 +574,28 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
int i, err = 0; int i, err = 0;
int free = -1; int free = -1;
int free_for_dup = -1;
bool dup = mlx4_is_mf_bonded(dev);
u8 dup_port = (port == 1) ? 2 : 1;
struct mlx4_vlan_table *dup_table = &mlx4_priv(dev)->port[dup_port].vlan_table;
bool need_mf_bond = mlx4_need_mf_bond(dev);
bool can_mf_bond = true;
mutex_lock(&table->mutex); mlx4_dbg(dev, "Registering VLAN: %d for port %d %s duplicate\n",
vlan, port,
dup ? "with" : "without");
if (need_mf_bond) {
if (port == 1) {
mutex_lock(&table->mutex);
mutex_lock(&dup_table->mutex);
} else {
mutex_lock(&dup_table->mutex);
mutex_lock(&table->mutex);
}
} else {
mutex_lock(&table->mutex);
}
if (table->total == table->max) { if (table->total == table->max) {
/* No free vlan entries */ /* No free vlan entries */
@ -389,22 +603,85 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
goto out; goto out;
} }
if (need_mf_bond) {
int index_at_port = -1;
int index_at_dup_port = -1;
for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
if ((vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i]))))
index_at_port = i;
if ((vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i]))))
index_at_dup_port = i;
}
/* check that same vlan is not in the tables at different indices */
if ((index_at_port != index_at_dup_port) &&
(index_at_port >= 0) &&
(index_at_dup_port >= 0))
can_mf_bond = false;
/* If the vlan is already in the primary table, the slot must be
* available in the duplicate table as well.
*/
if (index_at_port >= 0 && index_at_dup_port < 0 &&
dup_table->refs[index_at_port]) {
can_mf_bond = false;
}
/* If the vlan is already in the duplicate table, check that the
* corresponding index is not occupied in the primary table, or
* the primary table already contains the vlan at the same index.
* Otherwise, you cannot bond (primary contains a different vlan
* at that index).
*/
if (index_at_dup_port >= 0) {
if (!table->refs[index_at_dup_port] ||
(vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[index_at_dup_port]))))
free_for_dup = index_at_dup_port;
else
can_mf_bond = false;
}
}
for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) { for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
if (free < 0 && (table->refs[i] == 0)) { if (!table->refs[i]) {
free = i; if (free < 0)
continue; free = i;
if (free_for_dup < 0 && need_mf_bond && can_mf_bond) {
if (!dup_table->refs[i])
free_for_dup = i;
}
} }
if (table->refs[i] && if ((table->refs[i] || table->is_dup[i]) &&
(vlan == (MLX4_VLAN_MASK & (vlan == (MLX4_VLAN_MASK &
be32_to_cpu(table->entries[i])))) { be32_to_cpu(table->entries[i])))) {
/* Vlan already registered, increase references count */ /* Vlan already registered, increase references count */
mlx4_dbg(dev, "vlan %u is already registered.\n", vlan);
*index = i; *index = i;
++table->refs[i]; ++table->refs[i];
if (dup) {
u16 dup_vlan = MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i]);
if (dup_vlan != vlan || !dup_table->is_dup[i]) {
mlx4_warn(dev, "register vlan: expected duplicate vlan %u on port %d index %d\n",
vlan, dup_port, i);
}
}
goto out; goto out;
} }
} }
if (need_mf_bond && (free_for_dup < 0)) {
if (dup) {
mlx4_warn(dev, "Fail to allocate duplicate VLAN table entry\n");
mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n");
dup = false;
}
can_mf_bond = false;
}
if (need_mf_bond && can_mf_bond)
free = free_for_dup;
if (free < 0) { if (free < 0) {
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
@ -412,6 +689,7 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
/* Register new VLAN */ /* Register new VLAN */
table->refs[free] = 1; table->refs[free] = 1;
table->is_dup[free] = false;
table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID); table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
err = mlx4_set_port_vlan_table(dev, port, table->entries); err = mlx4_set_port_vlan_table(dev, port, table->entries);
@ -421,11 +699,35 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
table->entries[free] = 0; table->entries[free] = 0;
goto out; goto out;
} }
++table->total;
if (dup) {
dup_table->refs[free] = 0;
dup_table->is_dup[free] = true;
dup_table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
err = mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries);
if (unlikely(err)) {
mlx4_warn(dev, "Failed adding duplicate vlan: %u\n", vlan);
dup_table->is_dup[free] = false;
dup_table->entries[free] = 0;
goto out;
}
++dup_table->total;
}
*index = free; *index = free;
++table->total;
out: out:
mutex_unlock(&table->mutex); if (need_mf_bond) {
if (port == 2) {
mutex_unlock(&table->mutex);
mutex_unlock(&dup_table->mutex);
} else {
mutex_unlock(&dup_table->mutex);
mutex_unlock(&table->mutex);
}
} else {
mutex_unlock(&table->mutex);
}
return err; return err;
} }
@ -455,8 +757,22 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
{ {
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
int index; int index;
bool dup = mlx4_is_mf_bonded(dev);
u8 dup_port = (port == 1) ? 2 : 1;
struct mlx4_vlan_table *dup_table = &mlx4_priv(dev)->port[dup_port].vlan_table;
if (dup) {
if (port == 1) {
mutex_lock(&table->mutex);
mutex_lock(&dup_table->mutex);
} else {
mutex_lock(&dup_table->mutex);
mutex_lock(&table->mutex);
}
} else {
mutex_lock(&table->mutex);
}
mutex_lock(&table->mutex);
if (mlx4_find_cached_vlan(dev, port, vlan, &index)) { if (mlx4_find_cached_vlan(dev, port, vlan, &index)) {
mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan); mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan);
goto out; goto out;
@ -467,16 +783,38 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
goto out; goto out;
} }
if (--table->refs[index]) { if (--table->refs[index] || table->is_dup[index]) {
mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n", mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n",
table->refs[index], index); table->refs[index], index);
if (!table->refs[index])
dup_table->is_dup[index] = false;
goto out; goto out;
} }
table->entries[index] = 0; table->entries[index] = 0;
mlx4_set_port_vlan_table(dev, port, table->entries); if (mlx4_set_port_vlan_table(dev, port, table->entries))
mlx4_warn(dev, "Fail to set vlan in port %d during unregister\n", port);
--table->total; --table->total;
if (dup) {
dup_table->is_dup[index] = false;
if (dup_table->refs[index])
goto out;
dup_table->entries[index] = 0;
if (mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries))
mlx4_warn(dev, "Fail to set vlan in duplicate port %d during unregister\n", dup_port);
--dup_table->total;
}
out: out:
mutex_unlock(&table->mutex); if (dup) {
if (port == 2) {
mutex_unlock(&table->mutex);
mutex_unlock(&dup_table->mutex);
} else {
mutex_unlock(&dup_table->mutex);
mutex_unlock(&table->mutex);
}
} else {
mutex_unlock(&table->mutex);
}
} }
void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
@ -495,6 +833,220 @@ void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
} }
EXPORT_SYMBOL_GPL(mlx4_unregister_vlan); EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
int mlx4_bond_mac_table(struct mlx4_dev *dev)
{
struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table;
struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table;
int ret = 0;
int i;
bool update1 = false;
bool update2 = false;
mutex_lock(&t1->mutex);
mutex_lock(&t2->mutex);
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
if ((t1->entries[i] != t2->entries[i]) &&
t1->entries[i] && t2->entries[i]) {
mlx4_warn(dev, "can't duplicate entry %d in mac table\n", i);
ret = -EINVAL;
goto unlock;
}
}
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
if (t1->entries[i] && !t2->entries[i]) {
t2->entries[i] = t1->entries[i];
t2->is_dup[i] = true;
update2 = true;
} else if (!t1->entries[i] && t2->entries[i]) {
t1->entries[i] = t2->entries[i];
t1->is_dup[i] = true;
update1 = true;
} else if (t1->entries[i] && t2->entries[i]) {
t1->is_dup[i] = true;
t2->is_dup[i] = true;
}
}
if (update1) {
ret = mlx4_set_port_mac_table(dev, 1, t1->entries);
if (ret)
mlx4_warn(dev, "failed to set MAC table for port 1 (%d)\n", ret);
}
if (!ret && update2) {
ret = mlx4_set_port_mac_table(dev, 2, t2->entries);
if (ret)
mlx4_warn(dev, "failed to set MAC table for port 2 (%d)\n", ret);
}
if (ret)
mlx4_warn(dev, "failed to create mirror MAC tables\n");
unlock:
mutex_unlock(&t2->mutex);
mutex_unlock(&t1->mutex);
return ret;
}
int mlx4_unbond_mac_table(struct mlx4_dev *dev)
{
struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table;
struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table;
int ret = 0;
int ret1;
int i;
bool update1 = false;
bool update2 = false;
mutex_lock(&t1->mutex);
mutex_lock(&t2->mutex);
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
if (t1->entries[i] != t2->entries[i]) {
mlx4_warn(dev, "mac table is in an unexpected state when trying to unbond\n");
ret = -EINVAL;
goto unlock;
}
}
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
if (!t1->entries[i])
continue;
t1->is_dup[i] = false;
if (!t1->refs[i]) {
t1->entries[i] = 0;
update1 = true;
}
t2->is_dup[i] = false;
if (!t2->refs[i]) {
t2->entries[i] = 0;
update2 = true;
}
}
if (update1) {
ret = mlx4_set_port_mac_table(dev, 1, t1->entries);
if (ret)
mlx4_warn(dev, "failed to unmirror MAC tables for port 1(%d)\n", ret);
}
if (update2) {
ret1 = mlx4_set_port_mac_table(dev, 2, t2->entries);
if (ret1) {
mlx4_warn(dev, "failed to unmirror MAC tables for port 2(%d)\n", ret1);
ret = ret1;
}
}
unlock:
mutex_unlock(&t2->mutex);
mutex_unlock(&t1->mutex);
return ret;
}
int mlx4_bond_vlan_table(struct mlx4_dev *dev)
{
struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table;
struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table;
int ret = 0;
int i;
bool update1 = false;
bool update2 = false;
mutex_lock(&t1->mutex);
mutex_lock(&t2->mutex);
for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
if ((t1->entries[i] != t2->entries[i]) &&
t1->entries[i] && t2->entries[i]) {
mlx4_warn(dev, "can't duplicate entry %d in vlan table\n", i);
ret = -EINVAL;
goto unlock;
}
}
for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
if (t1->entries[i] && !t2->entries[i]) {
t2->entries[i] = t1->entries[i];
t2->is_dup[i] = true;
update2 = true;
} else if (!t1->entries[i] && t2->entries[i]) {
t1->entries[i] = t2->entries[i];
t1->is_dup[i] = true;
update1 = true;
} else if (t1->entries[i] && t2->entries[i]) {
t1->is_dup[i] = true;
t2->is_dup[i] = true;
}
}
if (update1) {
ret = mlx4_set_port_vlan_table(dev, 1, t1->entries);
if (ret)
mlx4_warn(dev, "failed to set VLAN table for port 1 (%d)\n", ret);
}
if (!ret && update2) {
ret = mlx4_set_port_vlan_table(dev, 2, t2->entries);
if (ret)
mlx4_warn(dev, "failed to set VLAN table for port 2 (%d)\n", ret);
}
if (ret)
mlx4_warn(dev, "failed to create mirror VLAN tables\n");
unlock:
mutex_unlock(&t2->mutex);
mutex_unlock(&t1->mutex);
return ret;
}
int mlx4_unbond_vlan_table(struct mlx4_dev *dev)
{
struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table;
struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table;
int ret = 0;
int ret1;
int i;
bool update1 = false;
bool update2 = false;
mutex_lock(&t1->mutex);
mutex_lock(&t2->mutex);
for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
if (t1->entries[i] != t2->entries[i]) {
mlx4_warn(dev, "vlan table is in an unexpected state when trying to unbond\n");
ret = -EINVAL;
goto unlock;
}
}
for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
if (!t1->entries[i])
continue;
t1->is_dup[i] = false;
if (!t1->refs[i]) {
t1->entries[i] = 0;
update1 = true;
}
t2->is_dup[i] = false;
if (!t2->refs[i]) {
t2->entries[i] = 0;
update2 = true;
}
}
if (update1) {
ret = mlx4_set_port_vlan_table(dev, 1, t1->entries);
if (ret)
mlx4_warn(dev, "failed to unmirror VLAN tables for port 1(%d)\n", ret);
}
if (update2) {
ret1 = mlx4_set_port_vlan_table(dev, 2, t2->entries);
if (ret1) {
mlx4_warn(dev, "failed to unmirror VLAN tables for port 2(%d)\n", ret1);
ret = ret1;
}
}
unlock:
mutex_unlock(&t2->mutex);
mutex_unlock(&t1->mutex);
return ret;
}
int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps) int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
{ {
struct mlx4_cmd_mailbox *inmailbox, *outmailbox; struct mlx4_cmd_mailbox *inmailbox, *outmailbox;

View File

@ -222,6 +222,13 @@ enum res_fs_rule_states {
struct res_fs_rule { struct res_fs_rule {
struct res_common com; struct res_common com;
int qpn; int qpn;
/* VF DMFS mbox with port flipped */
void *mirr_mbox;
/* > 0 --> apply mirror when getting into HA mode */
/* = 0 --> un-apply mirror when getting out of HA mode */
u32 mirr_mbox_size;
struct list_head mirr_list;
u64 mirr_rule_id;
}; };
static void *res_tracker_lookup(struct rb_root *root, u64 res_id) static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
@ -4284,6 +4291,22 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
return err; return err;
} }
static u32 qp_attach_mbox_size(void *mbox)
{
u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl);
struct _rule_hw *rule_header;
rule_header = (struct _rule_hw *)(mbox + size);
while (rule_header->size) {
size += rule_header->size * sizeof(u32);
rule_header += 1;
}
return size;
}
static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule);
int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr, struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *inbox,
@ -4300,6 +4323,8 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_net_trans_rule_hw_ctrl *ctrl; struct mlx4_net_trans_rule_hw_ctrl *ctrl;
struct _rule_hw *rule_header; struct _rule_hw *rule_header;
int header_id; int header_id;
struct res_fs_rule *rrule;
u32 mbox_size;
if (dev->caps.steering_mode != if (dev->caps.steering_mode !=
MLX4_STEERING_MODE_DEVICE_MANAGED) MLX4_STEERING_MODE_DEVICE_MANAGED)
@ -4328,7 +4353,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
case MLX4_NET_TRANS_RULE_ID_ETH: case MLX4_NET_TRANS_RULE_ID_ETH:
if (validate_eth_header_mac(slave, rule_header, rlist)) { if (validate_eth_header_mac(slave, rule_header, rlist)) {
err = -EINVAL; err = -EINVAL;
goto err_put; goto err_put_qp;
} }
break; break;
case MLX4_NET_TRANS_RULE_ID_IB: case MLX4_NET_TRANS_RULE_ID_IB:
@ -4339,7 +4364,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n"); pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
if (add_eth_header(dev, slave, inbox, rlist, header_id)) { if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
err = -EINVAL; err = -EINVAL;
goto err_put; goto err_put_qp;
} }
vhcr->in_modifier += vhcr->in_modifier +=
sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
@ -4347,7 +4372,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
default: default:
pr_err("Corrupted mailbox\n"); pr_err("Corrupted mailbox\n");
err = -EINVAL; err = -EINVAL;
goto err_put; goto err_put_qp;
} }
execute: execute:
@ -4356,23 +4381,69 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE); MLX4_CMD_NATIVE);
if (err) if (err)
goto err_put; goto err_put_qp;
err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn); err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
if (err) { if (err) {
mlx4_err(dev, "Fail to add flow steering resources\n"); mlx4_err(dev, "Fail to add flow steering resources\n");
/* detach rule*/ goto err_detach;
}
err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule);
if (err)
goto err_detach;
mbox_size = qp_attach_mbox_size(inbox->buf);
rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL);
if (!rrule->mirr_mbox) {
err = -ENOMEM;
goto err_put_rule;
}
rrule->mirr_mbox_size = mbox_size;
rrule->mirr_rule_id = 0;
memcpy(rrule->mirr_mbox, inbox->buf, mbox_size);
/* set different port */
ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox;
if (ctrl->port == 1)
ctrl->port = 2;
else
ctrl->port = 1;
if (mlx4_is_bonded(dev))
mlx4_do_mirror_rule(dev, rrule);
atomic_inc(&rqp->ref_count);
err_put_rule:
put_res(dev, slave, vhcr->out_param, RES_FS_RULE);
err_detach:
/* detach rule on error */
if (err)
mlx4_cmd(dev, vhcr->out_param, 0, 0, mlx4_cmd(dev, vhcr->out_param, 0, 0,
MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE); MLX4_CMD_NATIVE);
goto err_put; err_put_qp:
}
atomic_inc(&rqp->ref_count);
err_put:
put_res(dev, slave, qpn, RES_QP); put_res(dev, slave, qpn, RES_QP);
return err; return err;
} }
static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
{
int err;
err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0);
if (err) {
mlx4_err(dev, "Fail to remove flow steering resources\n");
return err;
}
mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
return 0;
}
int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr, struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *inbox,
@ -4382,6 +4453,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
int err; int err;
struct res_qp *rqp; struct res_qp *rqp;
struct res_fs_rule *rrule; struct res_fs_rule *rrule;
u64 mirr_reg_id;
if (dev->caps.steering_mode != if (dev->caps.steering_mode !=
MLX4_STEERING_MODE_DEVICE_MANAGED) MLX4_STEERING_MODE_DEVICE_MANAGED)
@ -4390,12 +4462,30 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule); err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
if (err) if (err)
return err; return err;
if (!rrule->mirr_mbox) {
mlx4_err(dev, "Mirror rules cannot be removed explicitly\n");
put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
return -EINVAL;
}
mirr_reg_id = rrule->mirr_rule_id;
kfree(rrule->mirr_mbox);
/* Release the rule form busy state before removal */ /* Release the rule form busy state before removal */
put_res(dev, slave, vhcr->in_param, RES_FS_RULE); put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp); err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
if (err) if (err)
return err; return err;
if (mirr_reg_id && mlx4_is_bonded(dev)) {
err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule);
if (err) {
mlx4_err(dev, "Fail to get resource of mirror rule\n");
} else {
put_res(dev, slave, mirr_reg_id, RES_FS_RULE);
mlx4_undo_mirror_rule(dev, rrule);
}
}
err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
if (err) { if (err) {
mlx4_err(dev, "Fail to remove flow steering resources\n"); mlx4_err(dev, "Fail to remove flow steering resources\n");
@ -4833,6 +4923,91 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
spin_unlock_irq(mlx4_tlock(dev)); spin_unlock_irq(mlx4_tlock(dev));
} }
static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
{
struct mlx4_cmd_mailbox *mailbox;
int err;
struct res_fs_rule *mirr_rule;
u64 reg_id;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
if (!fs_rule->mirr_mbox) {
mlx4_err(dev, "rule mirroring mailbox is null\n");
return -EINVAL;
}
memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
err = mlx4_cmd_imm(dev, mailbox->dma, &reg_id, fs_rule->mirr_mbox_size >> 2, 0,
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
if (err)
goto err;
err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn);
if (err)
goto err_detach;
err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule);
if (err)
goto err_rem;
fs_rule->mirr_rule_id = reg_id;
mirr_rule->mirr_rule_id = 0;
mirr_rule->mirr_mbox_size = 0;
mirr_rule->mirr_mbox = NULL;
put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE);
return 0;
err_rem:
rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0);
err_detach:
mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
err:
return err;
}
static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker =
&priv->mfunc.master.res_tracker;
struct rb_root *root = &tracker->res_tree[RES_FS_RULE];
struct rb_node *p;
struct res_fs_rule *fs_rule;
int err = 0;
LIST_HEAD(mirr_list);
for (p = rb_first(root); p; p = rb_next(p)) {
fs_rule = rb_entry(p, struct res_fs_rule, com.node);
if ((bond && fs_rule->mirr_mbox_size) ||
(!bond && !fs_rule->mirr_mbox_size))
list_add_tail(&fs_rule->mirr_list, &mirr_list);
}
list_for_each_entry(fs_rule, &mirr_list, mirr_list) {
if (bond)
err += mlx4_do_mirror_rule(dev, fs_rule);
else
err += mlx4_undo_mirror_rule(dev, fs_rule);
}
return err;
}
int mlx4_bond_fs_rules(struct mlx4_dev *dev)
{
return mlx4_mirror_fs_rules(dev, true);
}
int mlx4_unbond_fs_rules(struct mlx4_dev *dev)
{
return mlx4_mirror_fs_rules(dev, false);
}
static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave) static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
{ {
struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_priv *priv = mlx4_priv(dev);

View File

@ -75,6 +75,11 @@ static inline int mlx4_is_bonded(struct mlx4_dev *dev)
return !!(dev->flags & MLX4_FLAG_BONDED); return !!(dev->flags & MLX4_FLAG_BONDED);
} }
static inline int mlx4_is_mf_bonded(struct mlx4_dev *dev)
{
return (mlx4_is_bonded(dev) && mlx4_is_mfunc(dev));
}
struct mlx4_port_map { struct mlx4_port_map {
u8 port1; u8 port1;
u8 port2; u8 port2;