bnxt_en: Fixed the VF link status after a link state change

The VF link state can be changed via the 'ip link set' cmd.
Currently, the new link state does not take effect immediately.

The fix is for the PF to send a link change async event to the
designated VF after a VF link state change.  This async event will
trigger the VF to update the link status.

Signed-off-by: Eddie Wai <eddie.wai@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eddie Wai 2016-09-19 03:58:09 -04:00 committed by David S. Miller
parent ae8e98a6fa
commit 350a714960
1 changed files with 42 additions and 42 deletions

View File

@ -19,6 +19,45 @@
#include "bnxt_ethtool.h" #include "bnxt_ethtool.h"
#ifdef CONFIG_BNXT_SRIOV #ifdef CONFIG_BNXT_SRIOV
static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
struct bnxt_vf_info *vf, u16 event_id)
{
struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_fwd_async_event_cmpl_input req = {0};
struct hwrm_async_event_cmpl *async_cmpl;
int rc = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
if (vf)
req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
else
/* broadcast this async event to all VFs */
req.encap_async_event_target_id = cpu_to_le16(0xffff);
async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
async_cmpl->type =
cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
async_cmpl->event_id = cpu_to_le16(event_id);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc) {
netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
rc);
goto fwd_async_event_cmpl_exit;
}
if (resp->error_code) {
netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
resp->error_code);
rc = -1;
}
fwd_async_event_cmpl_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
{ {
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
@ -243,8 +282,9 @@ int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
rc = -EINVAL; rc = -EINVAL;
break; break;
} }
/* CHIMP TODO: send msg to VF to update new link state */ if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
return rc; return rc;
} }
@ -525,46 +565,6 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
return rc; return rc;
} }
static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
struct bnxt_vf_info *vf,
u16 event_id)
{
int rc = 0;
struct hwrm_fwd_async_event_cmpl_input req = {0};
struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_async_event_cmpl *async_cmpl;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
if (vf)
req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
else
/* broadcast this async event to all VFs */
req.encap_async_event_target_id = cpu_to_le16(0xffff);
async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
async_cmpl->type =
cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
async_cmpl->event_id = cpu_to_le16(event_id);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc) {
netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
rc);
goto fwd_async_event_cmpl_exit;
}
if (resp->error_code) {
netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
resp->error_code);
rc = -1;
}
fwd_async_event_cmpl_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
void bnxt_sriov_disable(struct bnxt *bp) void bnxt_sriov_disable(struct bnxt *bp)
{ {
u16 num_vfs = pci_num_vf(bp->pdev); u16 num_vfs = pci_num_vf(bp->pdev);