linux/drivers/net/ethernet/intel/ice/ice_main.c

5504 lines
139 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Intel Corporation. */
/* Intel(R) Ethernet Connection E800 Series Linux Driver */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ice.h"
#define DRV_VERSION "ice-0.7.0-k"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
const char ice_drv_ver[] = DRV_VERSION;
static const char ice_driver_string[] = DRV_SUMMARY;
static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
static int debug = -1;
module_param(debug, int, 0644);
#ifndef CONFIG_DYNAMIC_DEBUG
MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
#else
MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
#endif /* !CONFIG_DYNAMIC_DEBUG */
static struct workqueue_struct *ice_wq;
static const struct net_device_ops ice_netdev_ops;
ice: Support link events, reset and rebuild Link events are posted to a PF's admin receive queue (ARQ). This patch adds the ability to detect and process link events. This patch also adds the ability to process resets. The driver can process the following resets: 1) EMP Reset (EMPR) 2) Global Reset (GLOBR) 3) Core Reset (CORER) 4) Physical Function Reset (PFR) EMPR is the largest level of reset that the driver can handle. An EMPR resets the manageability block and also the data path, including PHY and link for all the PFs. The affected PFs are notified of this event through a miscellaneous interrupt. GLOBR is a subset of EMPR. It does everything EMPR does except that it doesn't reset the manageability block. CORER is a subset of GLOBR. It does everything GLOBR does but doesn't reset PHY and link. PFR is a subset of CORER and affects only the given physical function. In other words, PFR can be thought of as a CORER for a single PF. Since only the issuing PF is affected, a PFR doesn't result in the miscellaneous interrupt being triggered. All the resets have the following in common: 1) Tx/Rx is halted and all queues are stopped. 2) All the VSIs and filters programmed for the PF are lost and have to be reprogrammed. 3) Control queue interfaces are reset and have to be reprogrammed. In the rebuild flow, control queues are reinitialized, VSIs are reallocated and filters are restored. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:18 +08:00
static void ice_pf_dis_all_vsi(struct ice_pf *pf);
static void ice_rebuild(struct ice_pf *pf);
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
static int ice_vsi_release(struct ice_vsi *vsi);
static void ice_update_vsi_stats(struct ice_vsi *vsi);
static void ice_update_pf_stats(struct ice_pf *pf);
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
/**
* ice_get_free_slot - get the next non-NULL location index in array
* @array: array to search
* @size: size of the array
* @curr: last known occupied index to be used as a search hint
*
* void * is being used to keep the functionality generic. This lets us use this
* function on any array of pointers.
*/
static int ice_get_free_slot(void *array, int size, int curr)
{
int **tmp_array = (int **)array;
int next;
if (curr < (size - 1) && !tmp_array[curr + 1]) {
next = curr + 1;
} else {
int i = 0;
while ((i < size) && (tmp_array[i]))
i++;
if (i == size)
next = ICE_NO_VSI;
else
next = i;
}
return next;
}
/**
* ice_search_res - Search the tracker for a block of resources
* @res: pointer to the resource
* @needed: size of the block needed
* @id: identifier to track owner
* Returns the base item index of the block, or -ENOMEM for error
*/
static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
{
int start = res->search_hint;
int end = start;
id |= ICE_RES_VALID_BIT;
do {
/* skip already allocated entries */
if (res->list[end++] & ICE_RES_VALID_BIT) {
start = end;
if ((start + needed) > res->num_entries)
break;
}
if (end == (start + needed)) {
int i = start;
/* there was enough, so assign it to the requestor */
while (i != end)
res->list[i++] = id;
if (end == res->num_entries)
end = 0;
res->search_hint = end;
return start;
}
} while (1);
return -ENOMEM;
}
/**
* ice_get_res - get a block of resources
* @pf: board private structure
* @res: pointer to the resource
* @needed: size of the block needed
* @id: identifier to track owner
*
* Returns the base item index of the block, or -ENOMEM for error
* The search_hint trick and lack of advanced fit-finding only works
* because we're highly likely to have all the same sized requests.
* Linear search time and any fragmentation should be minimal.
*/
static int
ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
{
int ret;
if (!res || !pf)
return -EINVAL;
if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
dev_err(&pf->pdev->dev,
"param err: needed=%d, num_entries = %d id=0x%04x\n",
needed, res->num_entries, id);
return -EINVAL;
}
/* search based on search_hint */
ret = ice_search_res(res, needed, id);
if (ret < 0) {
/* previous search failed. Reset search hint and try again */
res->search_hint = 0;
ret = ice_search_res(res, needed, id);
}
return ret;
}
/**
* ice_free_res - free a block of resources
* @res: pointer to the resource
* @index: starting index previously returned by ice_get_res
* @id: identifier to track owner
* Returns number of resources freed
*/
static int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
{
int count = 0;
int i;
if (!res || index >= res->num_entries)
return -EINVAL;
id |= ICE_RES_VALID_BIT;
for (i = index; i < res->num_entries && res->list[i] == id; i++) {
res->list[i] = 0;
count++;
}
return count;
}
ice: Add support for switch filter programming A VSI needs traffic directed towards it. This is done by programming filter rules on the switch (embedded vSwitch) element in the hardware, which connects the VSI to the ingress/egress port. This patch introduces data structures and functions necessary to add remove or update switch rules on the switch element. This is a pretty low level function that is generic enough to add a whole range of filters. This patch also introduces two top level functions ice_add_mac and ice_remove mac which through a series of intermediate helper functions eventually call ice_aq_sw_rules to add/delete simple MAC based filters. It's worth noting that one invocation of ice_add_mac/ice_remove_mac is capable of adding/deleting multiple MAC filters. Also worth noting is the fact that the driver maintains a list of currently active filters, so every filter addition/removal causes an update to this list. This is done for a couple of reasons: 1) If two VSIs try to add the same filters, we need to detect it and do things a little differently (i.e. use VSI lists, described below) as the same filter can't be added more than once. 2) In the event of a hardware reset we can simply walk through this list and restore the filters. VSI Lists: In a multi-VSI situation, it's possible that multiple VSIs want to add the same filter rule. For example, two VSIs that want to receive broadcast traffic would both add a filter for destination MAC ff:ff:ff:ff:ff:ff. This can become cumbersome to maintain and so this is handled using a VSI list. A VSI list is resource that can be allocated in the hardware using the ice_aq_alloc_free_res admin queue command. Simply put, a VSI list can be thought of as a subscription list containing a set of VSIs to which the packet should be forwarded, should the filter match. For example, if VSI-0 has already added a broadcast filter, and VSI-1 wants to do the same thing, the filter creation flow will detect this, allocate a VSI list and update the switch rule so that broadcast traffic will now be forwarded to the VSI list which contains VSI-0 and VSI-1. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:12 +08:00
/**
* ice_add_mac_to_list - Add a mac address filter entry to the list
* @vsi: the VSI to be forwarded to
* @add_list: pointer to the list which contains MAC filter entries
* @macaddr: the MAC address to be added.
*
* Adds mac address filter entry to the temp list
*
* Returns 0 on success or ENOMEM on failure.
*/
static int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
const u8 *macaddr)
{
struct ice_fltr_list_entry *tmp;
struct ice_pf *pf = vsi->back;
tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC);
if (!tmp)
return -ENOMEM;
tmp->fltr_info.flag = ICE_FLTR_TX;
tmp->fltr_info.src = vsi->vsi_num;
tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr);
INIT_LIST_HEAD(&tmp->list_entry);
list_add(&tmp->list_entry, add_list);
return 0;
}
/**
* ice_add_mac_to_sync_list - creates list of mac addresses to be synced
* @netdev: the net device on which the sync is happening
* @addr: mac address to sync
*
* This is a callback function which is called by the in kernel device sync
* functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
* populates the tmp_sync_list, which is later used by ice_add_mac to add the
* mac filters from the hardware.
*/
static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
if (ice_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr))
return -EINVAL;
return 0;
}
/**
* ice_add_mac_to_unsync_list - creates list of mac addresses to be unsynced
* @netdev: the net device on which the unsync is happening
* @addr: mac address to unsync
*
* This is a callback function which is called by the in kernel device unsync
* functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
* populates the tmp_unsync_list, which is later used by ice_remove_mac to
* delete the mac filters from the hardware.
*/
static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
if (ice_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr))
return -EINVAL;
return 0;
}
ice: Add support for switch filter programming A VSI needs traffic directed towards it. This is done by programming filter rules on the switch (embedded vSwitch) element in the hardware, which connects the VSI to the ingress/egress port. This patch introduces data structures and functions necessary to add remove or update switch rules on the switch element. This is a pretty low level function that is generic enough to add a whole range of filters. This patch also introduces two top level functions ice_add_mac and ice_remove mac which through a series of intermediate helper functions eventually call ice_aq_sw_rules to add/delete simple MAC based filters. It's worth noting that one invocation of ice_add_mac/ice_remove_mac is capable of adding/deleting multiple MAC filters. Also worth noting is the fact that the driver maintains a list of currently active filters, so every filter addition/removal causes an update to this list. This is done for a couple of reasons: 1) If two VSIs try to add the same filters, we need to detect it and do things a little differently (i.e. use VSI lists, described below) as the same filter can't be added more than once. 2) In the event of a hardware reset we can simply walk through this list and restore the filters. VSI Lists: In a multi-VSI situation, it's possible that multiple VSIs want to add the same filter rule. For example, two VSIs that want to receive broadcast traffic would both add a filter for destination MAC ff:ff:ff:ff:ff:ff. This can become cumbersome to maintain and so this is handled using a VSI list. A VSI list is resource that can be allocated in the hardware using the ice_aq_alloc_free_res admin queue command. Simply put, a VSI list can be thought of as a subscription list containing a set of VSIs to which the packet should be forwarded, should the filter match. For example, if VSI-0 has already added a broadcast filter, and VSI-1 wants to do the same thing, the filter creation flow will detect this, allocate a VSI list and update the switch rule so that broadcast traffic will now be forwarded to the VSI list which contains VSI-0 and VSI-1. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:12 +08:00
/**
* ice_free_fltr_list - free filter lists helper
* @dev: pointer to the device struct
* @h: pointer to the list head to be freed
*
* Helper function to free filter lists previously created using
* ice_add_mac_to_list
*/
static void ice_free_fltr_list(struct device *dev, struct list_head *h)
{
struct ice_fltr_list_entry *e, *tmp;
list_for_each_entry_safe(e, tmp, h, list_entry) {
list_del(&e->list_entry);
devm_kfree(dev, e);
}
}
/**
* ice_vsi_fltr_changed - check if filter state changed
* @vsi: VSI to be checked
*
* returns true if filter state has changed, false otherwise.
*/
static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
{
return test_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags) ||
test_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags) ||
test_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
}
/**
* ice_vsi_sync_fltr - Update the VSI filter list to the HW
* @vsi: ptr to the VSI
*
* Push any outstanding VSI filter changes through the AdminQ.
*/
static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
{
struct device *dev = &vsi->back->pdev->dev;
struct net_device *netdev = vsi->netdev;
bool promisc_forced_on = false;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
enum ice_status status = 0;
u32 changed_flags = 0;
int err = 0;
if (!vsi->netdev)
return -EINVAL;
while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state))
usleep_range(1000, 2000);
changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
vsi->current_netdev_flags = vsi->netdev->flags;
INIT_LIST_HEAD(&vsi->tmp_sync_list);
INIT_LIST_HEAD(&vsi->tmp_unsync_list);
if (ice_vsi_fltr_changed(vsi)) {
clear_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
clear_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
clear_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
/* grab the netdev's addr_list_lock */
netif_addr_lock_bh(netdev);
__dev_uc_sync(netdev, ice_add_mac_to_sync_list,
ice_add_mac_to_unsync_list);
__dev_mc_sync(netdev, ice_add_mac_to_sync_list,
ice_add_mac_to_unsync_list);
/* our temp lists are populated. release lock */
netif_addr_unlock_bh(netdev);
}
/* Remove mac addresses in the unsync list */
status = ice_remove_mac(hw, &vsi->tmp_unsync_list);
ice_free_fltr_list(dev, &vsi->tmp_unsync_list);
if (status) {
netdev_err(netdev, "Failed to delete MAC filters\n");
/* if we failed because of alloc failures, just bail */
if (status == ICE_ERR_NO_MEMORY) {
err = -ENOMEM;
goto out;
}
}
/* Add mac addresses in the sync list */
status = ice_add_mac(hw, &vsi->tmp_sync_list);
ice_free_fltr_list(dev, &vsi->tmp_sync_list);
if (status) {
netdev_err(netdev, "Failed to add MAC filters\n");
/* If there is no more space for new umac filters, vsi
* should go into promiscuous mode. There should be some
* space reserved for promiscuous filters.
*/
if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
!test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC,
vsi->state)) {
promisc_forced_on = true;
netdev_warn(netdev,
"Reached MAC filter limit, forcing promisc mode on VSI %d\n",
vsi->vsi_num);
} else {
err = -EIO;
goto out;
}
}
/* check for changes in promiscuous modes */
if (changed_flags & IFF_ALLMULTI)
netdev_warn(netdev, "Unsupported configuration\n");
if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) {
clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
if (vsi->current_netdev_flags & IFF_PROMISC) {
/* Apply TX filter rule to get traffic from VMs */
status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true,
ICE_FLTR_TX);
if (status) {
netdev_err(netdev, "Error setting default VSI %i tx rule\n",
vsi->vsi_num);
vsi->current_netdev_flags &= ~IFF_PROMISC;
err = -EIO;
goto out_promisc;
}
/* Apply RX filter rule to get traffic from wire */
status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true,
ICE_FLTR_RX);
if (status) {
netdev_err(netdev, "Error setting default VSI %i rx rule\n",
vsi->vsi_num);
vsi->current_netdev_flags &= ~IFF_PROMISC;
err = -EIO;
goto out_promisc;
}
} else {
/* Clear TX filter rule to stop traffic from VMs */
status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false,
ICE_FLTR_TX);
if (status) {
netdev_err(netdev, "Error clearing default VSI %i tx rule\n",
vsi->vsi_num);
vsi->current_netdev_flags |= IFF_PROMISC;
err = -EIO;
goto out_promisc;
}
/* Clear filter RX to remove traffic from wire */
status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false,
ICE_FLTR_RX);
if (status) {
netdev_err(netdev, "Error clearing default VSI %i rx rule\n",
vsi->vsi_num);
vsi->current_netdev_flags |= IFF_PROMISC;
err = -EIO;
goto out_promisc;
}
}
}
goto exit;
out_promisc:
set_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
goto exit;
out:
/* if something went wrong then set the changed flag so we try again */
set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
exit:
clear_bit(__ICE_CFG_BUSY, vsi->state);
return err;
}
/**
* ice_sync_fltr_subtask - Sync the VSI filter list with HW
* @pf: board private structure
*/
static void ice_sync_fltr_subtask(struct ice_pf *pf)
{
int v;
if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
return;
clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
for (v = 0; v < pf->num_alloc_vsi; v++)
if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
ice_vsi_sync_fltr(pf->vsi[v])) {
/* come back and try again later */
set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
break;
}
}
ice: Support link events, reset and rebuild Link events are posted to a PF's admin receive queue (ARQ). This patch adds the ability to detect and process link events. This patch also adds the ability to process resets. The driver can process the following resets: 1) EMP Reset (EMPR) 2) Global Reset (GLOBR) 3) Core Reset (CORER) 4) Physical Function Reset (PFR) EMPR is the largest level of reset that the driver can handle. An EMPR resets the manageability block and also the data path, including PHY and link for all the PFs. The affected PFs are notified of this event through a miscellaneous interrupt. GLOBR is a subset of EMPR. It does everything EMPR does except that it doesn't reset the manageability block. CORER is a subset of GLOBR. It does everything GLOBR does but doesn't reset PHY and link. PFR is a subset of CORER and affects only the given physical function. In other words, PFR can be thought of as a CORER for a single PF. Since only the issuing PF is affected, a PFR doesn't result in the miscellaneous interrupt being triggered. All the resets have the following in common: 1) Tx/Rx is halted and all queues are stopped. 2) All the VSIs and filters programmed for the PF are lost and have to be reprogrammed. 3) Control queue interfaces are reset and have to be reprogrammed. In the rebuild flow, control queues are reinitialized, VSIs are reallocated and filters are restored. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:18 +08:00
/**
* ice_is_reset_recovery_pending - schedule a reset
* @state: pf state field
*/
static bool ice_is_reset_recovery_pending(unsigned long int *state)
{
return test_bit(__ICE_RESET_RECOVERY_PENDING, state);
}
/**
* ice_prepare_for_reset - prep for the core to reset
* @pf: board private structure
*
* Inform or close all dependent features in prep for reset.
*/
static void
ice_prepare_for_reset(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
u32 v;
ice_for_each_vsi(pf, v)
if (pf->vsi[v])
ice_remove_vsi_fltr(hw, pf->vsi[v]->vsi_num);
dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
/* disable the VSIs and their queues that are not already DOWN */
/* pf_dis_all_vsi modifies netdev structures -rtnl_lock needed */
ice_pf_dis_all_vsi(pf);
ice_for_each_vsi(pf, v)
if (pf->vsi[v])
pf->vsi[v]->vsi_num = 0;
ice_shutdown_all_ctrlq(hw);
}
/**
* ice_do_reset - Initiate one of many types of resets
* @pf: board private structure
* @reset_type: reset type requested
* before this function was called.
*/
static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct device *dev = &pf->pdev->dev;
struct ice_hw *hw = &pf->hw;
dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
WARN_ON(in_interrupt());
/* PFR is a bit of a special case because it doesn't result in an OICR
* interrupt. So for PFR, we prepare for reset, issue the reset and
* rebuild sequentially.
*/
if (reset_type == ICE_RESET_PFR) {
set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
ice_prepare_for_reset(pf);
}
/* trigger the reset */
if (ice_reset(hw, reset_type)) {
dev_err(dev, "reset %d failed\n", reset_type);
set_bit(__ICE_RESET_FAILED, pf->state);
clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
return;
}
if (reset_type == ICE_RESET_PFR) {
pf->pfr_count++;
ice_rebuild(pf);
clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
}
}
/**
* ice_reset_subtask - Set up for resetting the device and driver
* @pf: board private structure
*/
static void ice_reset_subtask(struct ice_pf *pf)
{
enum ice_reset_req reset_type;
rtnl_lock();
/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
* OICR interrupt. The OICR handler (ice_misc_intr) determines what
* type of reset happened and sets __ICE_RESET_RECOVERY_PENDING bit in
* pf->state. So if reset/recovery is pending (as indicated by this bit)
* we do a rebuild and return.
*/
if (ice_is_reset_recovery_pending(pf->state)) {
clear_bit(__ICE_GLOBR_RECV, pf->state);
clear_bit(__ICE_CORER_RECV, pf->state);
ice_prepare_for_reset(pf);
/* make sure we are ready to rebuild */
if (ice_check_reset(&pf->hw))
set_bit(__ICE_RESET_FAILED, pf->state);
else
ice_rebuild(pf);
clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
goto unlock;
}
/* No pending resets to finish processing. Check for new resets */
if (test_and_clear_bit(__ICE_GLOBR_REQ, pf->state))
reset_type = ICE_RESET_GLOBR;
else if (test_and_clear_bit(__ICE_CORER_REQ, pf->state))
reset_type = ICE_RESET_CORER;
else if (test_and_clear_bit(__ICE_PFR_REQ, pf->state))
reset_type = ICE_RESET_PFR;
else
goto unlock;
/* reset if not already down or resetting */
if (!test_bit(__ICE_DOWN, pf->state) &&
!test_bit(__ICE_CFG_BUSY, pf->state)) {
ice_do_reset(pf, reset_type);
}
unlock:
rtnl_unlock();
}
/**
* ice_watchdog_subtask - periodic tasks not using event driven scheduling
* @pf: board private structure
*/
static void ice_watchdog_subtask(struct ice_pf *pf)
{
int i;
/* if interface is down do nothing */
if (test_bit(__ICE_DOWN, pf->state) ||
test_bit(__ICE_CFG_BUSY, pf->state))
return;
/* make sure we don't do these things too often */
if (time_before(jiffies,
pf->serv_tmr_prev + pf->serv_tmr_period))
return;
pf->serv_tmr_prev = jiffies;
/* Update the stats for active netdevs so the network stack
* can look at updated numbers whenever it cares to
*/
ice_update_pf_stats(pf);
for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && pf->vsi[i]->netdev)
ice_update_vsi_stats(pf->vsi[i]);
}
/**
* ice_print_link_msg - print link up or down message
* @vsi: the VSI whose link status is being queried
* @isup: boolean for if the link is now up or down
*/
void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
{
const char *speed;
const char *fc;
if (vsi->current_isup == isup)
return;
vsi->current_isup = isup;
if (!isup) {
netdev_info(vsi->netdev, "NIC Link is Down\n");
return;
}
switch (vsi->port_info->phy.link_info.link_speed) {
case ICE_AQ_LINK_SPEED_40GB:
speed = "40 G";
break;
case ICE_AQ_LINK_SPEED_25GB:
speed = "25 G";
break;
case ICE_AQ_LINK_SPEED_20GB:
speed = "20 G";
break;
case ICE_AQ_LINK_SPEED_10GB:
speed = "10 G";
break;
case ICE_AQ_LINK_SPEED_5GB:
speed = "5 G";
break;
case ICE_AQ_LINK_SPEED_2500MB:
speed = "2.5 G";
break;
case ICE_AQ_LINK_SPEED_1000MB:
speed = "1 G";
break;
case ICE_AQ_LINK_SPEED_100MB:
speed = "100 M";
break;
default:
speed = "Unknown";
break;
}
switch (vsi->port_info->fc.current_mode) {
case ICE_FC_FULL:
fc = "RX/TX";
break;
case ICE_FC_TX_PAUSE:
fc = "TX";
break;
case ICE_FC_RX_PAUSE:
fc = "RX";
break;
default:
fc = "Unknown";
break;
}
netdev_info(vsi->netdev, "NIC Link is up %sbps, Flow Control: %s\n",
speed, fc);
}
ice: Support link events, reset and rebuild Link events are posted to a PF's admin receive queue (ARQ). This patch adds the ability to detect and process link events. This patch also adds the ability to process resets. The driver can process the following resets: 1) EMP Reset (EMPR) 2) Global Reset (GLOBR) 3) Core Reset (CORER) 4) Physical Function Reset (PFR) EMPR is the largest level of reset that the driver can handle. An EMPR resets the manageability block and also the data path, including PHY and link for all the PFs. The affected PFs are notified of this event through a miscellaneous interrupt. GLOBR is a subset of EMPR. It does everything EMPR does except that it doesn't reset the manageability block. CORER is a subset of GLOBR. It does everything GLOBR does but doesn't reset PHY and link. PFR is a subset of CORER and affects only the given physical function. In other words, PFR can be thought of as a CORER for a single PF. Since only the issuing PF is affected, a PFR doesn't result in the miscellaneous interrupt being triggered. All the resets have the following in common: 1) Tx/Rx is halted and all queues are stopped. 2) All the VSIs and filters programmed for the PF are lost and have to be reprogrammed. 3) Control queue interfaces are reset and have to be reprogrammed. In the rebuild flow, control queues are reinitialized, VSIs are reallocated and filters are restored. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:18 +08:00
/**
* ice_init_link_events - enable/initialize link events
* @pi: pointer to the port_info instance
*
* Returns -EIO on failure, 0 on success
*/
static int ice_init_link_events(struct ice_port_info *pi)
{
u16 mask;
mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
dev_dbg(ice_hw_to_dev(pi->hw),
"Failed to set link event mask for port %d\n",
pi->lport);
return -EIO;
}
if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
dev_dbg(ice_hw_to_dev(pi->hw),
"Failed to enable link events for port %d\n",
pi->lport);
return -EIO;
}
return 0;
}
/**
* ice_vsi_link_event - update the vsi's netdev
* @vsi: the vsi on which the link event occurred
* @link_up: whether or not the vsi needs to be set up or down
*/
static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
{
if (!vsi || test_bit(__ICE_DOWN, vsi->state))
return;
if (vsi->type == ICE_VSI_PF) {
if (!vsi->netdev) {
dev_dbg(&vsi->back->pdev->dev,
"vsi->netdev is not initialized!\n");
return;
}
if (link_up) {
netif_carrier_on(vsi->netdev);
netif_tx_wake_all_queues(vsi->netdev);
} else {
netif_carrier_off(vsi->netdev);
netif_tx_stop_all_queues(vsi->netdev);
}
}
}
/**
* ice_link_event - process the link event
* @pf: pf that the link event is associated with
* @pi: port_info for the port that the link event is associated with
*
* Returns -EIO if ice_get_link_status() fails
* Returns 0 on success
*/
static int
ice_link_event(struct ice_pf *pf, struct ice_port_info *pi)
{
u8 new_link_speed, old_link_speed;
struct ice_phy_info *phy_info;
bool new_link_same_as_old;
bool new_link, old_link;
u8 lport;
u16 v;
phy_info = &pi->phy;
phy_info->link_info_old = phy_info->link_info;
/* Force ice_get_link_status() to update link info */
phy_info->get_link_info = true;
old_link = (phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
old_link_speed = phy_info->link_info_old.link_speed;
lport = pi->lport;
if (ice_get_link_status(pi, &new_link)) {
dev_dbg(&pf->pdev->dev,
"Could not get link status for port %d\n", lport);
return -EIO;
}
new_link_speed = phy_info->link_info.link_speed;
new_link_same_as_old = (new_link == old_link &&
new_link_speed == old_link_speed);
ice_for_each_vsi(pf, v) {
struct ice_vsi *vsi = pf->vsi[v];
if (!vsi || !vsi->port_info)
continue;
if (new_link_same_as_old &&
(test_bit(__ICE_DOWN, vsi->state) ||
new_link == netif_carrier_ok(vsi->netdev)))
continue;
if (vsi->port_info->lport == lport) {
ice_print_link_msg(vsi, new_link);
ice_vsi_link_event(vsi, new_link);
}
}
return 0;
}
/**
* ice_handle_link_event - handle link event via ARQ
* @pf: pf that the link event is associated with
*
* Return -EINVAL if port_info is null
* Return status on succes
*/
static int ice_handle_link_event(struct ice_pf *pf)
{
struct ice_port_info *port_info;
int status;
port_info = pf->hw.port_info;
if (!port_info)
return -EINVAL;
status = ice_link_event(pf, port_info);
if (status)
dev_dbg(&pf->pdev->dev,
"Could not process link event, error %d\n", status);
return status;
}
/**
* __ice_clean_ctrlq - helper function to clean controlq rings
* @pf: ptr to struct ice_pf
* @q_type: specific Control queue type
*/
static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
{
struct ice_rq_event_info event;
struct ice_hw *hw = &pf->hw;
struct ice_ctl_q_info *cq;
u16 pending, i = 0;
const char *qtype;
u32 oldval, val;
ice: Support link events, reset and rebuild Link events are posted to a PF's admin receive queue (ARQ). This patch adds the ability to detect and process link events. This patch also adds the ability to process resets. The driver can process the following resets: 1) EMP Reset (EMPR) 2) Global Reset (GLOBR) 3) Core Reset (CORER) 4) Physical Function Reset (PFR) EMPR is the largest level of reset that the driver can handle. An EMPR resets the manageability block and also the data path, including PHY and link for all the PFs. The affected PFs are notified of this event through a miscellaneous interrupt. GLOBR is a subset of EMPR. It does everything EMPR does except that it doesn't reset the manageability block. CORER is a subset of GLOBR. It does everything GLOBR does but doesn't reset PHY and link. PFR is a subset of CORER and affects only the given physical function. In other words, PFR can be thought of as a CORER for a single PF. Since only the issuing PF is affected, a PFR doesn't result in the miscellaneous interrupt being triggered. All the resets have the following in common: 1) Tx/Rx is halted and all queues are stopped. 2) All the VSIs and filters programmed for the PF are lost and have to be reprogrammed. 3) Control queue interfaces are reset and have to be reprogrammed. In the rebuild flow, control queues are reinitialized, VSIs are reallocated and filters are restored. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:18 +08:00
/* Do not clean control queue if/when PF reset fails */
if (test_bit(__ICE_RESET_FAILED, pf->state))
return 0;
switch (q_type) {
case ICE_CTL_Q_ADMIN:
cq = &hw->adminq;
qtype = "Admin";
break;
default:
dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n",
q_type);
return 0;
}
/* check for error indications - PF_xx_AxQLEN register layout for
* FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
*/
val = rd32(hw, cq->rq.len);
if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
PF_FW_ARQLEN_ARQCRIT_M)) {
oldval = val;
if (val & PF_FW_ARQLEN_ARQVFE_M)
dev_dbg(&pf->pdev->dev,
"%s Receive Queue VF Error detected\n", qtype);
if (val & PF_FW_ARQLEN_ARQOVFL_M) {
dev_dbg(&pf->pdev->dev,
"%s Receive Queue Overflow Error detected\n",
qtype);
}
if (val & PF_FW_ARQLEN_ARQCRIT_M)
dev_dbg(&pf->pdev->dev,
"%s Receive Queue Critical Error detected\n",
qtype);
val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
PF_FW_ARQLEN_ARQCRIT_M);
if (oldval != val)
wr32(hw, cq->rq.len, val);
}
val = rd32(hw, cq->sq.len);
if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
PF_FW_ATQLEN_ATQCRIT_M)) {
oldval = val;
if (val & PF_FW_ATQLEN_ATQVFE_M)
dev_dbg(&pf->pdev->dev,
"%s Send Queue VF Error detected\n", qtype);
if (val & PF_FW_ATQLEN_ATQOVFL_M) {
dev_dbg(&pf->pdev->dev,
"%s Send Queue Overflow Error detected\n",
qtype);
}
if (val & PF_FW_ATQLEN_ATQCRIT_M)
dev_dbg(&pf->pdev->dev,
"%s Send Queue Critical Error detected\n",
qtype);
val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
PF_FW_ATQLEN_ATQCRIT_M);
if (oldval != val)
wr32(hw, cq->sq.len, val);
}
event.buf_len = cq->rq_buf_size;
event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len,
GFP_KERNEL);
if (!event.msg_buf)
return 0;
do {
enum ice_status ret;
ice: Support link events, reset and rebuild Link events are posted to a PF's admin receive queue (ARQ). This patch adds the ability to detect and process link events. This patch also adds the ability to process resets. The driver can process the following resets: 1) EMP Reset (EMPR) 2) Global Reset (GLOBR) 3) Core Reset (CORER) 4) Physical Function Reset (PFR) EMPR is the largest level of reset that the driver can handle. An EMPR resets the manageability block and also the data path, including PHY and link for all the PFs. The affected PFs are notified of this event through a miscellaneous interrupt. GLOBR is a subset of EMPR. It does everything EMPR does except that it doesn't reset the manageability block. CORER is a subset of GLOBR. It does everything GLOBR does but doesn't reset PHY and link. PFR is a subset of CORER and affects only the given physical function. In other words, PFR can be thought of as a CORER for a single PF. Since only the issuing PF is affected, a PFR doesn't result in the miscellaneous interrupt being triggered. All the resets have the following in common: 1) Tx/Rx is halted and all queues are stopped. 2) All the VSIs and filters programmed for the PF are lost and have to be reprogrammed. 3) Control queue interfaces are reset and have to be reprogrammed. In the rebuild flow, control queues are reinitialized, VSIs are reallocated and filters are restored. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:18 +08:00
u16 opcode;
ret = ice_clean_rq_elem(hw, cq, &event, &pending);
if (ret == ICE_ERR_AQ_NO_WORK)
break;
if (ret) {
dev_err(&pf->pdev->dev,
"%s Receive Queue event error %d\n", qtype,
ret);
break;
}
ice: Support link events, reset and rebuild Link events are posted to a PF's admin receive queue (ARQ). This patch adds the ability to detect and process link events. This patch also adds the ability to process resets. The driver can process the following resets: 1) EMP Reset (EMPR) 2) Global Reset (GLOBR) 3) Core Reset (CORER) 4) Physical Function Reset (PFR) EMPR is the largest level of reset that the driver can handle. An EMPR resets the manageability block and also the data path, including PHY and link for all the PFs. The affected PFs are notified of this event through a miscellaneous interrupt. GLOBR is a subset of EMPR. It does everything EMPR does except that it doesn't reset the manageability block. CORER is a subset of GLOBR. It does everything GLOBR does but doesn't reset PHY and link. PFR is a subset of CORER and affects only the given physical function. In other words, PFR can be thought of as a CORER for a single PF. Since only the issuing PF is affected, a PFR doesn't result in the miscellaneous interrupt being triggered. All the resets have the following in common: 1) Tx/Rx is halted and all queues are stopped. 2) All the VSIs and filters programmed for the PF are lost and have to be reprogrammed. 3) Control queue interfaces are reset and have to be reprogrammed. In the rebuild flow, control queues are reinitialized, VSIs are reallocated and filters are restored. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:18 +08:00
opcode = le16_to_cpu(event.desc.opcode);
switch (opcode) {
case ice_aqc_opc_get_link_status:
if (ice_handle_link_event(pf))
dev_err(&pf->pdev->dev,
"Could not handle link event");
break;
default:
dev_dbg(&pf->pdev->dev,
"%s Receive Queue unknown event 0x%04x ignored\n",
qtype, opcode);
break;
}
} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
devm_kfree(&pf->pdev->dev, event.msg_buf);
return pending && (i == ICE_DFLT_IRQ_WORK);
}
/**
* ice_ctrlq_pending - check if there is a difference between ntc and ntu
* @hw: pointer to hardware info
* @cq: control queue information
*
* returns true if there are pending messages in a queue, false if there aren't
*/
static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
u16 ntu;
ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
return cq->rq.next_to_clean != ntu;
}
/**
* ice_clean_adminq_subtask - clean the AdminQ rings
* @pf: board private structure
*/
static void ice_clean_adminq_subtask(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
return;
if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
return;
clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
/* There might be a situation where new messages arrive to a control
* queue between processing the last message and clearing the
* EVENT_PENDING bit. So before exiting, check queue head again (using
* ice_ctrlq_pending) and process new messages if any.
*/
if (ice_ctrlq_pending(hw, &hw->adminq))
__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
ice_flush(hw);
}
/**
* ice_service_task_schedule - schedule the service task to wake up
* @pf: board private structure
*
* If not already scheduled, this puts the task into the work queue.
*/
static void ice_service_task_schedule(struct ice_pf *pf)
{
if (!test_bit(__ICE_DOWN, pf->state) &&
!test_and_set_bit(__ICE_SERVICE_SCHED, pf->state))
queue_work(ice_wq, &pf->serv_task);
}
/**
* ice_service_task_complete - finish up the service task
* @pf: board private structure
*/
static void ice_service_task_complete(struct ice_pf *pf)
{
WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state));
/* force memory (pf->state) to sync before next service task */
smp_mb__before_atomic();
clear_bit(__ICE_SERVICE_SCHED, pf->state);
}
/**
* ice_service_timer - timer callback to schedule service task
* @t: pointer to timer_list
*/
static void ice_service_timer(struct timer_list *t)
{
struct ice_pf *pf = from_timer(pf, t, serv_tmr);
mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
ice_service_task_schedule(pf);
}
/**
* ice_service_task - manage and run subtasks
* @work: pointer to work_struct contained by the PF struct
*/
static void ice_service_task(struct work_struct *work)
{
struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
unsigned long start_time = jiffies;
/* subtasks */
ice: Support link events, reset and rebuild Link events are posted to a PF's admin receive queue (ARQ). This patch adds the ability to detect and process link events. This patch also adds the ability to process resets. The driver can process the following resets: 1) EMP Reset (EMPR) 2) Global Reset (GLOBR) 3) Core Reset (CORER) 4) Physical Function Reset (PFR) EMPR is the largest level of reset that the driver can handle. An EMPR resets the manageability block and also the data path, including PHY and link for all the PFs. The affected PFs are notified of this event through a miscellaneous interrupt. GLOBR is a subset of EMPR. It does everything EMPR does except that it doesn't reset the manageability block. CORER is a subset of GLOBR. It does everything GLOBR does but doesn't reset PHY and link. PFR is a subset of CORER and affects only the given physical function. In other words, PFR can be thought of as a CORER for a single PF. Since only the issuing PF is affected, a PFR doesn't result in the miscellaneous interrupt being triggered. All the resets have the following in common: 1) Tx/Rx is halted and all queues are stopped. 2) All the VSIs and filters programmed for the PF are lost and have to be reprogrammed. 3) Control queue interfaces are reset and have to be reprogrammed. In the rebuild flow, control queues are reinitialized, VSIs are reallocated and filters are restored. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:18 +08:00
/* process reset requests first */
ice_reset_subtask(pf);
/* bail if a reset/recovery cycle is pending */
if (ice_is_reset_recovery_pending(pf->state) ||
test_bit(__ICE_SUSPENDED, pf->state)) {
ice_service_task_complete(pf);
return;
}
ice_sync_fltr_subtask(pf);
ice_watchdog_subtask(pf);
ice_clean_adminq_subtask(pf);
/* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
ice_service_task_complete(pf);
/* If the tasks have taken longer than one service timer period
* or there is more work to be done, reset the service timer to
* schedule the service task now.
*/
if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
mod_timer(&pf->serv_tmr, jiffies);
}
/**
* ice_set_ctrlq_len - helper function to set controlq length
* @hw: pointer to the hw instance
*/
static void ice_set_ctrlq_len(struct ice_hw *hw)
{
hw->adminq.num_rq_entries = ICE_AQ_LEN;
hw->adminq.num_sq_entries = ICE_AQ_LEN;
hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
}
/**
* ice_irq_affinity_notify - Callback for affinity changes
* @notify: context as to what irq was changed
* @mask: the new affinity mask
*
* This is a callback function used by the irq_set_affinity_notifier function
* so that we may register to receive changes to the irq affinity masks.
*/
static void ice_irq_affinity_notify(struct irq_affinity_notify *notify,
const cpumask_t *mask)
{
struct ice_q_vector *q_vector =
container_of(notify, struct ice_q_vector, affinity_notify);
cpumask_copy(&q_vector->affinity_mask, mask);
}
/**
* ice_irq_affinity_release - Callback for affinity notifier release
* @ref: internal core kernel usage
*
* This is a callback function used by the irq_set_affinity_notifier function
* to inform the current notification subscriber that they will no longer
* receive notifications.
*/
static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
/**
* ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
* @vsi: the VSI being un-configured
*/
static void ice_vsi_dis_irq(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
int base = vsi->base_vector;
u32 val;
int i;
/* disable interrupt causation from each queue */
if (vsi->tx_rings) {
ice_for_each_txq(vsi, i) {
if (vsi->tx_rings[i]) {
u16 reg;
reg = vsi->tx_rings[i]->reg_idx;
val = rd32(hw, QINT_TQCTL(reg));
val &= ~QINT_TQCTL_CAUSE_ENA_M;
wr32(hw, QINT_TQCTL(reg), val);
}
}
}
if (vsi->rx_rings) {
ice_for_each_rxq(vsi, i) {
if (vsi->rx_rings[i]) {
u16 reg;
reg = vsi->rx_rings[i]->reg_idx;
val = rd32(hw, QINT_RQCTL(reg));
val &= ~QINT_RQCTL_CAUSE_ENA_M;
wr32(hw, QINT_RQCTL(reg), val);
}
}
}
/* disable each interrupt */
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
for (i = vsi->base_vector;
i < (vsi->num_q_vectors + vsi->base_vector); i++)
wr32(hw, GLINT_DYN_CTL(i), 0);
ice_flush(hw);
for (i = 0; i < vsi->num_q_vectors; i++)
synchronize_irq(pf->msix_entries[i + base].vector);
}
}
/**
* ice_vsi_ena_irq - Enable IRQ for the given VSI
* @vsi: the VSI being configured
*/
static int ice_vsi_ena_irq(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
int i;
for (i = 0; i < vsi->num_q_vectors; i++)
ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
}
ice_flush(hw);
return 0;
}
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
/**
* ice_vsi_delete - delete a VSI from the switch
* @vsi: pointer to VSI being removed
*/
static void ice_vsi_delete(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_vsi_ctx ctxt;
enum ice_status status;
ctxt.vsi_num = vsi->vsi_num;
memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props));
status = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL);
if (status)
dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n",
vsi->vsi_num);
}
/**
* ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
* @vsi: the VSI being configured
* @basename: name for the vector
*/
static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
{
int q_vectors = vsi->num_q_vectors;
struct ice_pf *pf = vsi->back;
int base = vsi->base_vector;
int rx_int_idx = 0;
int tx_int_idx = 0;
int vector, err;
int irq_num;
for (vector = 0; vector < q_vectors; vector++) {
struct ice_q_vector *q_vector = vsi->q_vectors[vector];
irq_num = pf->msix_entries[base + vector].vector;
if (q_vector->tx.ring && q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "TxRx", rx_int_idx++);
tx_int_idx++;
} else if (q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "rx", rx_int_idx++);
} else if (q_vector->tx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "tx", tx_int_idx++);
} else {
/* skip this unused q_vector */
continue;
}
err = devm_request_irq(&pf->pdev->dev,
pf->msix_entries[base + vector].vector,
vsi->irq_handler, 0, q_vector->name,
q_vector);
if (err) {
netdev_err(vsi->netdev,
"MSIX request_irq failed, error: %d\n", err);
goto free_q_irqs;
}
/* register for affinity change notifications */
q_vector->affinity_notify.notify = ice_irq_affinity_notify;
q_vector->affinity_notify.release = ice_irq_affinity_release;
irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
/* assign the mask for this irq */
irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
}
vsi->irqs_ready = true;
return 0;
free_q_irqs:
while (vector) {
vector--;
irq_num = pf->msix_entries[base + vector].vector,
irq_set_affinity_notifier(irq_num, NULL);
irq_set_affinity_hint(irq_num, NULL);
devm_free_irq(&pf->pdev->dev, irq_num, &vsi->q_vectors[vector]);
}
return err;
}
/**
* ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
* @vsi: the VSI being configured
*/
static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
{
struct ice_hw_common_caps *cap;
struct ice_pf *pf = vsi->back;
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
vsi->rss_size = 1;
return;
}
cap = &pf->hw.func_caps.common_cap;
switch (vsi->type) {
case ICE_VSI_PF:
/* PF VSI will inherit RSS instance of PF */
vsi->rss_table_size = cap->rss_table_size;
vsi->rss_size = min_t(int, num_online_cpus(),
BIT(cap->rss_table_entry_width));
vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
break;
default:
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
break;
}
}
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
/**
* ice_vsi_setup_q_map - Setup a VSI queue map
* @vsi: the VSI being configured
* @ctxt: VSI context structure
*/
static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
u16 offset = 0, qmap = 0, numq_tc;
u16 pow = 0, max_rss = 0, qcount;
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
u16 qcount_tx = vsi->alloc_txq;
u16 qcount_rx = vsi->alloc_rxq;
bool ena_tc0 = false;
int i;
/* at least TC0 should be enabled by default */
if (vsi->tc_cfg.numtc) {
if (!(vsi->tc_cfg.ena_tc & BIT(0)))
ena_tc0 = true;
} else {
ena_tc0 = true;
}
if (ena_tc0) {
vsi->tc_cfg.numtc++;
vsi->tc_cfg.ena_tc |= 1;
}
numq_tc = qcount_rx / vsi->tc_cfg.numtc;
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
/* TC mapping is a function of the number of Rx queues assigned to the
* VSI for each traffic class and the offset of these queues.
* The first 10 bits are for queue offset for TC0, next 4 bits for no:of
* queues allocated to TC0. No:of queues is a power-of-2.
*
* If TC is not enabled, the queue offset is set to 0, and allocate one
* queue, this way, traffic for the given TC will be sent to the default
* queue.
*
* Setup number and offset of Rx queues for all TCs for the VSI
*/
/* qcount will change if RSS is enabled */
if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
if (vsi->type == ICE_VSI_PF)
max_rss = ICE_MAX_LG_RSS_QS;
else
max_rss = ICE_MAX_SMALL_RSS_QS;
qcount = min_t(int, numq_tc, max_rss);
qcount = min_t(int, qcount, vsi->rss_size);
} else {
qcount = numq_tc;
}
/* find the (rounded up) power-of-2 of qcount */
pow = order_base_2(qcount);
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
/* TC is not enabled */
vsi->tc_cfg.tc_info[i].qoffset = 0;
vsi->tc_cfg.tc_info[i].qcount = 1;
ctxt->info.tc_mapping[i] = 0;
continue;
}
/* TC is enabled */
vsi->tc_cfg.tc_info[i].qoffset = offset;
vsi->tc_cfg.tc_info[i].qcount = qcount;
qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
ICE_AQ_VSI_TC_Q_OFFSET_M) |
((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
ICE_AQ_VSI_TC_Q_NUM_M);
offset += qcount;
ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
}
vsi->num_txq = qcount_tx;
vsi->num_rxq = offset;
/* Rx queue mapping */
ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
/* q_mapping buffer holds the info for the first queue allocated for
* this VSI in the PF space and also the number of queues associated
* with this VSI.
*/
ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
}
/**
* ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
* @ctxt: the VSI context being set
*
* This initializes a default VSI context for all sections except the Queues.
*/
static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
{
u32 table = 0;
memset(&ctxt->info, 0, sizeof(ctxt->info));
/* VSI's should be allocated from shared pool */
ctxt->alloc_from_pool = true;
/* Src pruning enabled by default */
ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
/* Traffic from VSI can be sent to LAN */
ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
/* By default bits 3 and 4 in port_vlan_flags are 0's which results in
* legacy behavior (show VLAN, DEI, and UP) in descriptor. Also, allow
* all packets untagged/tagged.
*/
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
ctxt->info.port_vlan_flags = ((ICE_AQ_VSI_PVLAN_MODE_ALL &
ICE_AQ_VSI_PVLAN_MODE_M) >>
ICE_AQ_VSI_PVLAN_MODE_S);
/* Have 1:1 UP mapping for both ingress/egress tables */
table |= ICE_UP_TABLE_TRANSLATE(0, 0);
table |= ICE_UP_TABLE_TRANSLATE(1, 1);
table |= ICE_UP_TABLE_TRANSLATE(2, 2);
table |= ICE_UP_TABLE_TRANSLATE(3, 3);
table |= ICE_UP_TABLE_TRANSLATE(4, 4);
table |= ICE_UP_TABLE_TRANSLATE(5, 5);
table |= ICE_UP_TABLE_TRANSLATE(6, 6);
table |= ICE_UP_TABLE_TRANSLATE(7, 7);
ctxt->info.ingress_table = cpu_to_le32(table);
ctxt->info.egress_table = cpu_to_le32(table);
/* Have 1:1 UP mapping for outer to inner UP table */
ctxt->info.outer_up_table = cpu_to_le32(table);
/* No Outer tag support outer_tag_flags remains to zero */
}
/**
* ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
* @ctxt: the VSI context being set
* @vsi: the VSI being configured
*/
static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
{
u8 lut_type, hash_type;
switch (vsi->type) {
case ICE_VSI_PF:
/* PF VSI will inherit RSS instance of PF */
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
default:
dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
vsi->type);
return;
}
ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
}
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
/**
* ice_vsi_add - Create a new VSI or fetch preallocated VSI
* @vsi: the VSI being configured
*
* This initializes a VSI context depending on the VSI type to be added and
* passes it down to the add_vsi aq command to create a new VSI.
*/
static int ice_vsi_add(struct ice_vsi *vsi)
{
struct ice_vsi_ctx ctxt = { 0 };
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
int ret = 0;
switch (vsi->type) {
case ICE_VSI_PF:
ctxt.flags = ICE_AQ_VSI_TYPE_PF;
break;
default:
return -ENODEV;
}
ice_set_dflt_vsi_ctx(&ctxt);
/* if the switch is in VEB mode, allow VSI loopback */
if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
/* Set LUT type and HASH type if RSS is enabled */
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_set_rss_vsi_ctx(&ctxt, vsi);
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
ctxt.info.sw_id = vsi->port_info->sw_id;
ice_vsi_setup_q_map(vsi, &ctxt);
ret = ice_aq_add_vsi(hw, &ctxt, NULL);
if (ret) {
dev_err(&vsi->back->pdev->dev,
"Add VSI AQ call failed, err %d\n", ret);
return -EIO;
}
vsi->info = ctxt.info;
vsi->vsi_num = ctxt.vsi_num;
return ret;
}
/**
* ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
* @vsi: the VSI being cleaned up
*/
static void ice_vsi_release_msix(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
u16 vector = vsi->base_vector;
struct ice_hw *hw = &pf->hw;
u32 txq = 0;
u32 rxq = 0;
int i, q;
for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), 0);
wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), 0);
for (q = 0; q < q_vector->num_ring_tx; q++) {
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
txq++;
}
for (q = 0; q < q_vector->num_ring_rx; q++) {
wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
rxq++;
}
}
ice_flush(hw);
}
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
/**
* ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
* @vsi: the VSI having rings deallocated
*/
static void ice_vsi_clear_rings(struct ice_vsi *vsi)
{
int i;
if (vsi->tx_rings) {
for (i = 0; i < vsi->alloc_txq; i++) {
if (vsi->tx_rings[i]) {
kfree_rcu(vsi->tx_rings[i], rcu);
vsi->tx_rings[i] = NULL;
}
}
}
if (vsi->rx_rings) {
for (i = 0; i < vsi->alloc_rxq; i++) {
if (vsi->rx_rings[i]) {
kfree_rcu(vsi->rx_rings[i], rcu);
vsi->rx_rings[i] = NULL;
}
}
}
}
/**
* ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
* @vsi: VSI which is having rings allocated
*/
static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
int i;
/* Allocate tx_rings */
for (i = 0; i < vsi->alloc_txq; i++) {
struct ice_ring *ring;
/* allocate with kzalloc(), free with kfree_rcu() */
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
goto err_out;
ring->q_index = i;
ring->reg_idx = vsi->txq_map[i];
ring->ring_active = false;
ring->vsi = vsi;
ring->netdev = vsi->netdev;
ring->dev = &pf->pdev->dev;
ring->count = vsi->num_desc;
vsi->tx_rings[i] = ring;
}
/* Allocate rx_rings */
for (i = 0; i < vsi->alloc_rxq; i++) {
struct ice_ring *ring;
/* allocate with kzalloc(), free with kfree_rcu() */
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
goto err_out;
ring->q_index = i;
ring->reg_idx = vsi->rxq_map[i];
ring->ring_active = false;
ring->vsi = vsi;
ring->netdev = vsi->netdev;
ring->dev = &pf->pdev->dev;
ring->count = vsi->num_desc;
vsi->rx_rings[i] = ring;
}
return 0;
err_out:
ice_vsi_clear_rings(vsi);
return -ENOMEM;
}
/**
* ice_vsi_free_irq - Free the irq association with the OS
* @vsi: the VSI being configured
*/
static void ice_vsi_free_irq(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
int base = vsi->base_vector;
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
int i;
if (!vsi->q_vectors || !vsi->irqs_ready)
return;
vsi->irqs_ready = false;
for (i = 0; i < vsi->num_q_vectors; i++) {
u16 vector = i + base;
int irq_num;
irq_num = pf->msix_entries[vector].vector;
/* free only the irqs that were actually requested */
if (!vsi->q_vectors[i] ||
!(vsi->q_vectors[i]->num_ring_tx ||
vsi->q_vectors[i]->num_ring_rx))
continue;
/* clear the affinity notifier in the IRQ descriptor */
irq_set_affinity_notifier(irq_num, NULL);
/* clear the affinity_mask in the IRQ descriptor */
irq_set_affinity_hint(irq_num, NULL);
synchronize_irq(irq_num);
devm_free_irq(&pf->pdev->dev, irq_num,
vsi->q_vectors[i]);
}
ice_vsi_release_msix(vsi);
}
}
/**
* ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
* @vsi: the VSI being configured
*/
static void ice_vsi_cfg_msix(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
u16 vector = vsi->base_vector;
struct ice_hw *hw = &pf->hw;
u32 txq = 0, rxq = 0;
int i, q, itr;
u8 itr_gran;
for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
itr_gran = hw->itr_gran_200;
if (q_vector->num_ring_rx) {
q_vector->rx.itr =
ITR_TO_REG(vsi->rx_rings[rxq]->rx_itr_setting,
itr_gran);
q_vector->rx.latency_range = ICE_LOW_LATENCY;
}
if (q_vector->num_ring_tx) {
q_vector->tx.itr =
ITR_TO_REG(vsi->tx_rings[txq]->tx_itr_setting,
itr_gran);
q_vector->tx.latency_range = ICE_LOW_LATENCY;
}
wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr);
wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr);
/* Both Transmit Queue Interrupt Cause Control register
* and Receive Queue Interrupt Cause control register
* expects MSIX_INDX field to be the vector index
* within the function space and not the absolute
* vector index across PF or across device.
* For SR-IOV VF VSIs queue vector index always starts
* with 1 since first vector index(0) is used for OICR
* in VF space. Since VMDq and other PF VSIs are withtin
* the PF function space, use the vector index thats
* tracked for this PF.
*/
for (q = 0; q < q_vector->num_ring_tx; q++) {
u32 val;
itr = ICE_TX_ITR;
val = QINT_TQCTL_CAUSE_ENA_M |
(itr << QINT_TQCTL_ITR_INDX_S) |
(vector << QINT_TQCTL_MSIX_INDX_S);
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
txq++;
}
for (q = 0; q < q_vector->num_ring_rx; q++) {
u32 val;
itr = ICE_RX_ITR;
val = QINT_RQCTL_CAUSE_ENA_M |
(itr << QINT_RQCTL_ITR_INDX_S) |
(vector << QINT_RQCTL_MSIX_INDX_S);
wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
rxq++;
}
}
ice_flush(hw);
}
/**
* ice_ena_misc_vector - enable the non-queue interrupts
* @pf: board private structure
*/
static void ice_ena_misc_vector(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
u32 val;
/* clear things first */
wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
rd32(hw, PFINT_OICR); /* read to clear */
val = (PFINT_OICR_HLP_RDY_M |
PFINT_OICR_CPM_RDY_M |
PFINT_OICR_ECC_ERR_M |
PFINT_OICR_MAL_DETECT_M |
PFINT_OICR_GRST_M |
PFINT_OICR_PCI_EXCEPTION_M |
PFINT_OICR_GPIO_M |
PFINT_OICR_STORM_DETECT_M |
PFINT_OICR_HMC_ERR_M);
wr32(hw, PFINT_OICR_ENA, val);
/* SW_ITR_IDX = 0, but don't change INTENA */
wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
}
/**
* ice_misc_intr - misc interrupt handler
* @irq: interrupt number
* @data: pointer to a q_vector
*/
static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
{
struct ice_pf *pf = (struct ice_pf *)data;
struct ice_hw *hw = &pf->hw;
irqreturn_t ret = IRQ_NONE;
u32 oicr, ena_mask;
set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
oicr = rd32(hw, PFINT_OICR);
ena_mask = rd32(hw, PFINT_OICR_ENA);
ice: Support link events, reset and rebuild Link events are posted to a PF's admin receive queue (ARQ). This patch adds the ability to detect and process link events. This patch also adds the ability to process resets. The driver can process the following resets: 1) EMP Reset (EMPR) 2) Global Reset (GLOBR) 3) Core Reset (CORER) 4) Physical Function Reset (PFR) EMPR is the largest level of reset that the driver can handle. An EMPR resets the manageability block and also the data path, including PHY and link for all the PFs. The affected PFs are notified of this event through a miscellaneous interrupt. GLOBR is a subset of EMPR. It does everything EMPR does except that it doesn't reset the manageability block. CORER is a subset of GLOBR. It does everything GLOBR does but doesn't reset PHY and link. PFR is a subset of CORER and affects only the given physical function. In other words, PFR can be thought of as a CORER for a single PF. Since only the issuing PF is affected, a PFR doesn't result in the miscellaneous interrupt being triggered. All the resets have the following in common: 1) Tx/Rx is halted and all queues are stopped. 2) All the VSIs and filters programmed for the PF are lost and have to be reprogrammed. 3) Control queue interfaces are reset and have to be reprogrammed. In the rebuild flow, control queues are reinitialized, VSIs are reallocated and filters are restored. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:18 +08:00
if (oicr & PFINT_OICR_GRST_M) {
u32 reset;
/* we have a reset warning */
ena_mask &= ~PFINT_OICR_GRST_M;
reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
GLGEN_RSTAT_RESET_TYPE_S;
if (reset == ICE_RESET_CORER)
pf->corer_count++;
else if (reset == ICE_RESET_GLOBR)
pf->globr_count++;
else
pf->empr_count++;
/* If a reset cycle isn't already in progress, we set a bit in
* pf->state so that the service task can start a reset/rebuild.
* We also make note of which reset happened so that peer
* devices/drivers can be informed.
*/
if (!test_bit(__ICE_RESET_RECOVERY_PENDING, pf->state)) {
if (reset == ICE_RESET_CORER)
set_bit(__ICE_CORER_RECV, pf->state);
else if (reset == ICE_RESET_GLOBR)
set_bit(__ICE_GLOBR_RECV, pf->state);
else
set_bit(__ICE_EMPR_RECV, pf->state);
set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
}
}
if (oicr & PFINT_OICR_HMC_ERR_M) {
ena_mask &= ~PFINT_OICR_HMC_ERR_M;
dev_dbg(&pf->pdev->dev,
"HMC Error interrupt - info 0x%x, data 0x%x\n",
rd32(hw, PFHMC_ERRORINFO),
rd32(hw, PFHMC_ERRORDATA));
}
/* Report and mask off any remaining unexpected interrupts */
oicr &= ena_mask;
if (oicr) {
dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n",
oicr);
/* If a critical error is pending there is no choice but to
* reset the device.
*/
if (oicr & (PFINT_OICR_PE_CRITERR_M |
PFINT_OICR_PCI_EXCEPTION_M |
ice: Support link events, reset and rebuild Link events are posted to a PF's admin receive queue (ARQ). This patch adds the ability to detect and process link events. This patch also adds the ability to process resets. The driver can process the following resets: 1) EMP Reset (EMPR) 2) Global Reset (GLOBR) 3) Core Reset (CORER) 4) Physical Function Reset (PFR) EMPR is the largest level of reset that the driver can handle. An EMPR resets the manageability block and also the data path, including PHY and link for all the PFs. The affected PFs are notified of this event through a miscellaneous interrupt. GLOBR is a subset of EMPR. It does everything EMPR does except that it doesn't reset the manageability block. CORER is a subset of GLOBR. It does everything GLOBR does but doesn't reset PHY and link. PFR is a subset of CORER and affects only the given physical function. In other words, PFR can be thought of as a CORER for a single PF. Since only the issuing PF is affected, a PFR doesn't result in the miscellaneous interrupt being triggered. All the resets have the following in common: 1) Tx/Rx is halted and all queues are stopped. 2) All the VSIs and filters programmed for the PF are lost and have to be reprogrammed. 3) Control queue interfaces are reset and have to be reprogrammed. In the rebuild flow, control queues are reinitialized, VSIs are reallocated and filters are restored. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:18 +08:00
PFINT_OICR_ECC_ERR_M)) {
set_bit(__ICE_PFR_REQ, pf->state);
ice: Support link events, reset and rebuild Link events are posted to a PF's admin receive queue (ARQ). This patch adds the ability to detect and process link events. This patch also adds the ability to process resets. The driver can process the following resets: 1) EMP Reset (EMPR) 2) Global Reset (GLOBR) 3) Core Reset (CORER) 4) Physical Function Reset (PFR) EMPR is the largest level of reset that the driver can handle. An EMPR resets the manageability block and also the data path, including PHY and link for all the PFs. The affected PFs are notified of this event through a miscellaneous interrupt. GLOBR is a subset of EMPR. It does everything EMPR does except that it doesn't reset the manageability block. CORER is a subset of GLOBR. It does everything GLOBR does but doesn't reset PHY and link. PFR is a subset of CORER and affects only the given physical function. In other words, PFR can be thought of as a CORER for a single PF. Since only the issuing PF is affected, a PFR doesn't result in the miscellaneous interrupt being triggered. All the resets have the following in common: 1) Tx/Rx is halted and all queues are stopped. 2) All the VSIs and filters programmed for the PF are lost and have to be reprogrammed. 3) Control queue interfaces are reset and have to be reprogrammed. In the rebuild flow, control queues are reinitialized, VSIs are reallocated and filters are restored. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:18 +08:00
ice_service_task_schedule(pf);
}
ena_mask &= ~oicr;
}
ret = IRQ_HANDLED;
/* re-enable interrupt causes that are not handled during this pass */
wr32(hw, PFINT_OICR_ENA, ena_mask);
if (!test_bit(__ICE_DOWN, pf->state)) {
ice_service_task_schedule(pf);
ice_irq_dynamic_ena(hw, NULL, NULL);
}
return ret;
}
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
/**
* ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
* @vsi: the VSI being configured
*
* This function maps descriptor rings to the queue-specific vectors allotted
* through the MSI-X enabling code. On a constrained vector budget, we map Tx
* and Rx rings to the vector as "efficiently" as possible.
*/
static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
{
int q_vectors = vsi->num_q_vectors;
int tx_rings_rem, rx_rings_rem;
int v_id;
/* initially assigning remaining rings count to VSIs num queue value */
tx_rings_rem = vsi->num_txq;
rx_rings_rem = vsi->num_rxq;
for (v_id = 0; v_id < q_vectors; v_id++) {
struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
/* Tx rings mapping to vector */
tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
q_vector->num_ring_tx = tx_rings_per_v;
q_vector->tx.ring = NULL;
q_base = vsi->num_txq - tx_rings_rem;
for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
struct ice_ring *tx_ring = vsi->tx_rings[q_id];
tx_ring->q_vector = q_vector;
tx_ring->next = q_vector->tx.ring;
q_vector->tx.ring = tx_ring;
}
tx_rings_rem -= tx_rings_per_v;
/* Rx rings mapping to vector */
rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
q_vector->num_ring_rx = rx_rings_per_v;
q_vector->rx.ring = NULL;
q_base = vsi->num_rxq - rx_rings_rem;
for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
struct ice_ring *rx_ring = vsi->rx_rings[q_id];
rx_ring->q_vector = q_vector;
rx_ring->next = q_vector->rx.ring;
q_vector->rx.ring = rx_ring;
}
rx_rings_rem -= rx_rings_per_v;
}
}
/**
* ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI
* @vsi: the VSI being configured
*
* Return 0 on success and a negative value on error
*/
static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
switch (vsi->type) {
case ICE_VSI_PF:
vsi->alloc_txq = pf->num_lan_tx;
vsi->alloc_rxq = pf->num_lan_rx;
vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);
vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
break;
default:
dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
vsi->type);
break;
}
}
/**
* ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
* @vsi: VSI pointer
* @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
*
* On error: returns error code (negative)
* On success: returns 0
*/
static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
{
struct ice_pf *pf = vsi->back;
/* allocate memory for both Tx and Rx ring pointers */
vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
sizeof(struct ice_ring *), GFP_KERNEL);
if (!vsi->tx_rings)
goto err_txrings;
vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
sizeof(struct ice_ring *), GFP_KERNEL);
if (!vsi->rx_rings)
goto err_rxrings;
if (alloc_qvectors) {
/* allocate memory for q_vector pointers */
vsi->q_vectors = devm_kcalloc(&pf->pdev->dev,
vsi->num_q_vectors,
sizeof(struct ice_q_vector *),
GFP_KERNEL);
if (!vsi->q_vectors)
goto err_vectors;
}
return 0;
err_vectors:
devm_kfree(&pf->pdev->dev, vsi->rx_rings);
err_rxrings:
devm_kfree(&pf->pdev->dev, vsi->tx_rings);
err_txrings:
return -ENOMEM;
}
/**
* ice_msix_clean_rings - MSIX mode Interrupt Handler
* @irq: interrupt number
* @data: pointer to a q_vector
*/
static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
{
struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
if (!q_vector->tx.ring && !q_vector->rx.ring)
return IRQ_HANDLED;
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
}
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
/**
* ice_vsi_alloc - Allocates the next available struct vsi in the PF
* @pf: board private structure
* @type: type of VSI
*
* returns a pointer to a VSI on success, NULL on failure.
*/
static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
{
struct ice_vsi *vsi = NULL;
/* Need to protect the allocation of the VSIs at the PF level */
mutex_lock(&pf->sw_mutex);
/* If we have already allocated our maximum number of VSIs,
* pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
* is available to be populated
*/
if (pf->next_vsi == ICE_NO_VSI) {
dev_dbg(&pf->pdev->dev, "out of VSI slots!\n");
goto unlock_pf;
}
vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL);
if (!vsi)
goto unlock_pf;
vsi->type = type;
vsi->back = pf;
set_bit(__ICE_DOWN, vsi->state);
vsi->idx = pf->next_vsi;
vsi->work_lmt = ICE_DFLT_IRQ_WORK;
ice_vsi_set_num_qs(vsi);
switch (vsi->type) {
case ICE_VSI_PF:
if (ice_vsi_alloc_arrays(vsi, true))
goto err_rings;
/* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings;
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
break;
default:
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
goto unlock_pf;
}
/* fill VSI slot in the PF struct */
pf->vsi[pf->next_vsi] = vsi;
/* prepare pf->next_vsi for next use */
pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
pf->next_vsi);
goto unlock_pf;
err_rings:
devm_kfree(&pf->pdev->dev, vsi);
vsi = NULL;
unlock_pf:
mutex_unlock(&pf->sw_mutex);
return vsi;
}
/**
* ice_free_irq_msix_misc - Unroll misc vector setup
* @pf: board private structure
*/
static void ice_free_irq_msix_misc(struct ice_pf *pf)
{
/* disable OICR interrupt */
wr32(&pf->hw, PFINT_OICR_ENA, 0);
ice_flush(&pf->hw);
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
devm_free_irq(&pf->pdev->dev,
pf->msix_entries[pf->oicr_idx].vector, pf);
}
ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
}
/**
* ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
* @pf: board private structure
*
* This sets up the handler for MSIX 0, which is used to manage the
* non-queue interrupts, e.g. AdminQ and errors. This is not used
* when in MSI or Legacy interrupt mode.
*/
static int ice_req_irq_msix_misc(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
int oicr_idx, err = 0;
u8 itr_gran;
u32 val;
if (!pf->int_name[0])
snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
dev_driver_string(&pf->pdev->dev),
dev_name(&pf->pdev->dev));
ice: Support link events, reset and rebuild Link events are posted to a PF's admin receive queue (ARQ). This patch adds the ability to detect and process link events. This patch also adds the ability to process resets. The driver can process the following resets: 1) EMP Reset (EMPR) 2) Global Reset (GLOBR) 3) Core Reset (CORER) 4) Physical Function Reset (PFR) EMPR is the largest level of reset that the driver can handle. An EMPR resets the manageability block and also the data path, including PHY and link for all the PFs. The affected PFs are notified of this event through a miscellaneous interrupt. GLOBR is a subset of EMPR. It does everything EMPR does except that it doesn't reset the manageability block. CORER is a subset of GLOBR. It does everything GLOBR does but doesn't reset PHY and link. PFR is a subset of CORER and affects only the given physical function. In other words, PFR can be thought of as a CORER for a single PF. Since only the issuing PF is affected, a PFR doesn't result in the miscellaneous interrupt being triggered. All the resets have the following in common: 1) Tx/Rx is halted and all queues are stopped. 2) All the VSIs and filters programmed for the PF are lost and have to be reprogrammed. 3) Control queue interfaces are reset and have to be reprogrammed. In the rebuild flow, control queues are reinitialized, VSIs are reallocated and filters are restored. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:18 +08:00
/* Do not request IRQ but do enable OICR interrupt since settings are
* lost during reset. Note that this function is called only during
* rebuild path and not while reset is in progress.
*/
if (ice_is_reset_recovery_pending(pf->state))
goto skip_req_irq;
/* reserve one vector in irq_tracker for misc interrupts */
oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
if (oicr_idx < 0)
return oicr_idx;
pf->oicr_idx = oicr_idx;
err = devm_request_irq(&pf->pdev->dev,
pf->msix_entries[pf->oicr_idx].vector,
ice_misc_intr, 0, pf->int_name, pf);
if (err) {
dev_err(&pf->pdev->dev,
"devm_request_irq for %s failed: %d\n",
pf->int_name, err);
ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
return err;
}
ice: Support link events, reset and rebuild Link events are posted to a PF's admin receive queue (ARQ). This patch adds the ability to detect and process link events. This patch also adds the ability to process resets. The driver can process the following resets: 1) EMP Reset (EMPR) 2) Global Reset (GLOBR) 3) Core Reset (CORER) 4) Physical Function Reset (PFR) EMPR is the largest level of reset that the driver can handle. An EMPR resets the manageability block and also the data path, including PHY and link for all the PFs. The affected PFs are notified of this event through a miscellaneous interrupt. GLOBR is a subset of EMPR. It does everything EMPR does except that it doesn't reset the manageability block. CORER is a subset of GLOBR. It does everything GLOBR does but doesn't reset PHY and link. PFR is a subset of CORER and affects only the given physical function. In other words, PFR can be thought of as a CORER for a single PF. Since only the issuing PF is affected, a PFR doesn't result in the miscellaneous interrupt being triggered. All the resets have the following in common: 1) Tx/Rx is halted and all queues are stopped. 2) All the VSIs and filters programmed for the PF are lost and have to be reprogrammed. 3) Control queue interfaces are reset and have to be reprogrammed. In the rebuild flow, control queues are reinitialized, VSIs are reallocated and filters are restored. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:18 +08:00
skip_req_irq:
ice_ena_misc_vector(pf);
val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
PFINT_OICR_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_OICR_CTL, val);
/* This enables Admin queue Interrupt causes */
val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
PFINT_FW_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_FW_CTL, val);
itr_gran = hw->itr_gran_200;
wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
ITR_TO_REG(ICE_ITR_8K, itr_gran));
ice_flush(hw);
ice_irq_dynamic_ena(hw, NULL, NULL);
return 0;
}
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
/**
* ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
* @vsi: the VSI getting queues
*
* Return 0 on success and a negative value on error
*/
static int ice_vsi_get_qs_contig(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
int offset, ret = 0;
mutex_lock(&pf->avail_q_mutex);
/* look for contiguous block of queues for tx */
offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS,
0, vsi->alloc_txq, 0);
if (offset < ICE_MAX_TXQS) {
int i;
bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq);
for (i = 0; i < vsi->alloc_txq; i++)
vsi->txq_map[i] = i + offset;
} else {
ret = -ENOMEM;
vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER;
}
/* look for contiguous block of queues for rx */
offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS,
0, vsi->alloc_rxq, 0);
if (offset < ICE_MAX_RXQS) {
int i;
bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq);
for (i = 0; i < vsi->alloc_rxq; i++)
vsi->rxq_map[i] = i + offset;
} else {
ret = -ENOMEM;
vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER;
}
mutex_unlock(&pf->avail_q_mutex);
return ret;
}
/**
* ice_vsi_get_qs_scatter - Assign a scattered queues to VSI
* @vsi: the VSI getting queues
*
* Return 0 on success and a negative value on error
*/
static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
int i, index = 0;
mutex_lock(&pf->avail_q_mutex);
if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) {
for (i = 0; i < vsi->alloc_txq; i++) {
index = find_next_zero_bit(pf->avail_txqs,
ICE_MAX_TXQS, index);
if (index < ICE_MAX_TXQS) {
set_bit(index, pf->avail_txqs);
vsi->txq_map[i] = index;
} else {
goto err_scatter_tx;
}
}
}
if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) {
for (i = 0; i < vsi->alloc_rxq; i++) {
index = find_next_zero_bit(pf->avail_rxqs,
ICE_MAX_RXQS, index);
if (index < ICE_MAX_RXQS) {
set_bit(index, pf->avail_rxqs);
vsi->rxq_map[i] = index;
} else {
goto err_scatter_rx;
}
}
}
mutex_unlock(&pf->avail_q_mutex);
return 0;
err_scatter_rx:
/* unflag any queues we have grabbed (i is failed position) */
for (index = 0; index < i; index++) {
clear_bit(vsi->rxq_map[index], pf->avail_rxqs);
vsi->rxq_map[index] = 0;
}
i = vsi->alloc_txq;
err_scatter_tx:
/* i is either position of failed attempt or vsi->alloc_txq */
for (index = 0; index < i; index++) {
clear_bit(vsi->txq_map[index], pf->avail_txqs);
vsi->txq_map[index] = 0;
}
mutex_unlock(&pf->avail_q_mutex);
return -ENOMEM;
}
/**
* ice_vsi_get_qs - Assign queues from PF to VSI
* @vsi: the VSI to assign queues to
*
* Returns 0 on success and a negative value on error
*/
static int ice_vsi_get_qs(struct ice_vsi *vsi)
{
int ret = 0;
vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
/* NOTE: ice_vsi_get_qs_contig() will set the rx/tx mapping
* modes individually to scatter if assigning contiguous queues
* to rx or tx fails
*/
ret = ice_vsi_get_qs_contig(vsi);
if (ret < 0) {
if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER)
vsi->alloc_txq = max_t(u16, vsi->alloc_txq,
ICE_MAX_SCATTER_TXQS);
if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER)
vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq,
ICE_MAX_SCATTER_RXQS);
ret = ice_vsi_get_qs_scatter(vsi);
}
return ret;
}
/**
* ice_vsi_put_qs - Release queues from VSI to PF
* @vsi: the VSI thats going to release queues
*/
static void ice_vsi_put_qs(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
int i;
mutex_lock(&pf->avail_q_mutex);
for (i = 0; i < vsi->alloc_txq; i++) {
clear_bit(vsi->txq_map[i], pf->avail_txqs);
vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
}
for (i = 0; i < vsi->alloc_rxq; i++) {
clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
}
mutex_unlock(&pf->avail_q_mutex);
}
/**
* ice_free_q_vector - Free memory allocated for a specific interrupt vector
* @vsi: VSI having the memory freed
* @v_idx: index of the vector to be freed
*/
static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
{
struct ice_q_vector *q_vector;
struct ice_ring *ring;
if (!vsi->q_vectors[v_idx]) {
dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n",
v_idx);
return;
}
q_vector = vsi->q_vectors[v_idx];
ice_for_each_ring(ring, q_vector->tx)
ring->q_vector = NULL;
ice_for_each_ring(ring, q_vector->rx)
ring->q_vector = NULL;
/* only VSI with an associated netdev is set up with NAPI */
if (vsi->netdev)
netif_napi_del(&q_vector->napi);
devm_kfree(&vsi->back->pdev->dev, q_vector);
vsi->q_vectors[v_idx] = NULL;
}
/**
* ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
* @vsi: the VSI having memory freed
*/
static void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
{
int v_idx;
for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
ice_free_q_vector(vsi, v_idx);
}
/**
* ice_cfg_netdev - Setup the netdev flags
* @vsi: the VSI being configured
*
* Returns 0 on success, negative value on failure
*/
static int ice_cfg_netdev(struct ice_vsi *vsi)
{
netdev_features_t csumo_features;
netdev_features_t vlano_features;
netdev_features_t dflt_features;
netdev_features_t tso_features;
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
struct ice_netdev_priv *np;
struct net_device *netdev;
u8 mac_addr[ETH_ALEN];
netdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv),
vsi->alloc_txq, vsi->alloc_rxq);
if (!netdev)
return -ENOMEM;
vsi->netdev = netdev;
np = netdev_priv(netdev);
np->vsi = vsi;
dflt_features = NETIF_F_SG |
NETIF_F_HIGHDMA |
NETIF_F_RXHASH;
csumo_features = NETIF_F_RXCSUM |
NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM;
vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
tso_features = NETIF_F_TSO;
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
/* set features that user can change */
netdev->hw_features = dflt_features | csumo_features |
vlano_features | tso_features;
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
/* enable features */
netdev->features |= netdev->hw_features;
/* encap and VLAN devices inherit default, csumo and tso features */
netdev->hw_enc_features |= dflt_features | csumo_features |
tso_features;
netdev->vlan_features |= dflt_features | csumo_features |
tso_features;
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
if (vsi->type == ICE_VSI_PF) {
SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev);
ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
ether_addr_copy(netdev->dev_addr, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
}
netdev->priv_flags |= IFF_UNICAST_FLT;
/* assign netdev_ops */
netdev->netdev_ops = &ice_netdev_ops;
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
/* setup watchdog timeout value to be 5 second */
netdev->watchdog_timeo = 5 * HZ;
ice_set_ethtool_ops(netdev);
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = ICE_MAX_MTU;
return 0;
}
/**
* ice_vsi_free_arrays - clean up vsi resources
* @vsi: pointer to VSI being cleared
* @free_qvectors: bool to specify if q_vectors should be deallocated
*/
static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors)
{
struct ice_pf *pf = vsi->back;
/* free the ring and vector containers */
if (free_qvectors && vsi->q_vectors) {
devm_kfree(&pf->pdev->dev, vsi->q_vectors);
vsi->q_vectors = NULL;
}
if (vsi->tx_rings) {
devm_kfree(&pf->pdev->dev, vsi->tx_rings);
vsi->tx_rings = NULL;
}
if (vsi->rx_rings) {
devm_kfree(&pf->pdev->dev, vsi->rx_rings);
vsi->rx_rings = NULL;
}
}
/**
* ice_vsi_clear - clean up and deallocate the provided vsi
* @vsi: pointer to VSI being cleared
*
* This deallocates the vsi's queue resources, removes it from the PF's
* VSI array if necessary, and deallocates the VSI
*
* Returns 0 on success, negative on failure
*/
static int ice_vsi_clear(struct ice_vsi *vsi)
{
struct ice_pf *pf = NULL;
if (!vsi)
return 0;
if (!vsi->back)
return -EINVAL;
pf = vsi->back;
if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n",
vsi->idx);
return -EINVAL;
}
mutex_lock(&pf->sw_mutex);
/* updates the PF for this cleared vsi */
pf->vsi[vsi->idx] = NULL;
if (vsi->idx < pf->next_vsi)
pf->next_vsi = vsi->idx;
ice_vsi_free_arrays(vsi, true);
mutex_unlock(&pf->sw_mutex);
devm_kfree(&pf->pdev->dev, vsi);
return 0;
}
/**
* ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
* @vsi: the VSI being configured
* @v_idx: index of the vector in the vsi struct
*
* We allocate one q_vector. If allocation fails we return -ENOMEM.
*/
static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
{
struct ice_pf *pf = vsi->back;
struct ice_q_vector *q_vector;
/* allocate q_vector */
q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
if (!q_vector)
return -ENOMEM;
q_vector->vsi = vsi;
q_vector->v_idx = v_idx;
/* only set affinity_mask if the CPU is online */
if (cpu_online(v_idx))
cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
if (vsi->netdev)
netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
NAPI_POLL_WEIGHT);
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
/* tie q_vector and vsi together */
vsi->q_vectors[v_idx] = q_vector;
return 0;
}
/**
* ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
* @vsi: the VSI being configured
*
* We allocate one q_vector per queue interrupt. If allocation fails we
* return -ENOMEM.
*/
static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
int v_idx = 0, num_q_vectors;
int err;
if (vsi->q_vectors[0]) {
dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
vsi->vsi_num);
return -EEXIST;
}
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
num_q_vectors = vsi->num_q_vectors;
} else {
err = -EINVAL;
goto err_out;
}
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
err = ice_vsi_alloc_q_vector(vsi, v_idx);
if (err)
goto err_out;
}
return 0;
err_out:
while (v_idx--)
ice_free_q_vector(vsi, v_idx);
dev_err(&pf->pdev->dev,
"Failed to allocate %d q_vector for VSI %d, ret=%d\n",
vsi->num_q_vectors, vsi->vsi_num, err);
vsi->num_q_vectors = 0;
return err;
}
/**
* ice_vsi_setup_vector_base - Set up the base vector for the given VSI
* @vsi: ptr to the VSI
*
* This should only be called after ice_vsi_alloc() which allocates the
* corresponding SW VSI structure and initializes num_queue_pairs for the
* newly allocated VSI.
*
* Returns 0 on success or negative on failure
*/
static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
int num_q_vectors = 0;
if (vsi->base_vector) {
dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
vsi->vsi_num, vsi->base_vector);
return -EEXIST;
}
if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
return -ENOENT;
switch (vsi->type) {
case ICE_VSI_PF:
num_q_vectors = vsi->num_q_vectors;
break;
default:
dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
vsi->type);
break;
}
if (num_q_vectors)
vsi->base_vector = ice_get_res(pf, pf->irq_tracker,
num_q_vectors, vsi->idx);
if (vsi->base_vector < 0) {
dev_err(&pf->pdev->dev,
"Failed to get tracking for %d vectors for VSI %d, err=%d\n",
num_q_vectors, vsi->vsi_num, vsi->base_vector);
return -ENOENT;
}
return 0;
}
/**
* ice_fill_rss_lut - Fill the RSS lookup table with default values
* @lut: Lookup table
* @rss_table_size: Lookup table size
* @rss_size: Range of queue number for hashing
*/
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
{
u16 i;
for (i = 0; i < rss_table_size; i++)
lut[i] = i % rss_size;
}
/**
* ice_vsi_cfg_rss - Configure RSS params for a VSI
* @vsi: VSI to be configured
*/
static int ice_vsi_cfg_rss(struct ice_vsi *vsi)
{
u8 seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE];
struct ice_aqc_get_set_rss_keys *key;
struct ice_pf *pf = vsi->back;
enum ice_status status;
int err = 0;
u8 *lut;
vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);
lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
if (!lut)
return -ENOMEM;
if (vsi->rss_lut_user)
memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
else
ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
status = ice_aq_set_rss_lut(&pf->hw, vsi->vsi_num, vsi->rss_lut_type,
lut, vsi->rss_table_size);
if (status) {
dev_err(&vsi->back->pdev->dev,
"set_rss_lut failed, error %d\n", status);
err = -EIO;
goto ice_vsi_cfg_rss_exit;
}
key = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*key), GFP_KERNEL);
if (!key) {
err = -ENOMEM;
goto ice_vsi_cfg_rss_exit;
}
if (vsi->rss_hkey_user)
memcpy(seed, vsi->rss_hkey_user,
ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
else
netdev_rss_key_fill((void *)seed,
ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
memcpy(&key->standard_rss_key, seed,
ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
status = ice_aq_set_rss_key(&pf->hw, vsi->vsi_num, key);
if (status) {
dev_err(&vsi->back->pdev->dev, "set_rss_key failed, error %d\n",
status);
err = -EIO;
}
devm_kfree(&pf->pdev->dev, key);
ice_vsi_cfg_rss_exit:
devm_kfree(&pf->pdev->dev, lut);
return err;
}
ice: Support link events, reset and rebuild Link events are posted to a PF's admin receive queue (ARQ). This patch adds the ability to detect and process link events. This patch also adds the ability to process resets. The driver can process the following resets: 1) EMP Reset (EMPR) 2) Global Reset (GLOBR) 3) Core Reset (CORER) 4) Physical Function Reset (PFR) EMPR is the largest level of reset that the driver can handle. An EMPR resets the manageability block and also the data path, including PHY and link for all the PFs. The affected PFs are notified of this event through a miscellaneous interrupt. GLOBR is a subset of EMPR. It does everything EMPR does except that it doesn't reset the manageability block. CORER is a subset of GLOBR. It does everything GLOBR does but doesn't reset PHY and link. PFR is a subset of CORER and affects only the given physical function. In other words, PFR can be thought of as a CORER for a single PF. Since only the issuing PF is affected, a PFR doesn't result in the miscellaneous interrupt being triggered. All the resets have the following in common: 1) Tx/Rx is halted and all queues are stopped. 2) All the VSIs and filters programmed for the PF are lost and have to be reprogrammed. 3) Control queue interfaces are reset and have to be reprogrammed. In the rebuild flow, control queues are reinitialized, VSIs are reallocated and filters are restored. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:18 +08:00
/**
* ice_vsi_reinit_setup - return resource and reallocate resource for a VSI
* @vsi: pointer to the ice_vsi
*
* This reallocates the VSIs queue resources
*
* Returns 0 on success and negative value on failure
*/
static int ice_vsi_reinit_setup(struct ice_vsi *vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
int ret, i;
if (!vsi)
return -EINVAL;
ice_vsi_free_q_vectors(vsi);
ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);
vsi->base_vector = 0;
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi, false);
ice_vsi_set_num_qs(vsi);
/* Initialize VSI struct elements and create VSI in FW */
ret = ice_vsi_add(vsi);
if (ret < 0)
goto err_vsi;
ret = ice_vsi_alloc_arrays(vsi, false);
if (ret < 0)
goto err_vsi;
switch (vsi->type) {
case ICE_VSI_PF:
if (!vsi->netdev) {
ret = ice_cfg_netdev(vsi);
if (ret)
goto err_rings;
ret = register_netdev(vsi->netdev);
if (ret)
goto err_rings;
netif_carrier_off(vsi->netdev);
netif_tx_stop_all_queues(vsi->netdev);
}
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
goto err_rings;
ret = ice_vsi_setup_vector_base(vsi);
if (ret)
goto err_vectors;
ret = ice_vsi_alloc_rings(vsi);
if (ret)
goto err_vectors;
ice_vsi_map_rings_to_vectors(vsi);
break;
default:
break;
}
ice_vsi_set_tc_cfg(vsi);
/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++)
max_txqs[i] = vsi->num_txq;
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num,
vsi->tc_cfg.ena_tc, max_txqs);
if (ret) {
dev_info(&vsi->back->pdev->dev,
"Failed VSI lan queue config\n");
goto err_vectors;
}
return 0;
err_vectors:
ice_vsi_free_q_vectors(vsi);
err_rings:
if (vsi->netdev) {
vsi->current_netdev_flags = 0;
ice: Support link events, reset and rebuild Link events are posted to a PF's admin receive queue (ARQ). This patch adds the ability to detect and process link events. This patch also adds the ability to process resets. The driver can process the following resets: 1) EMP Reset (EMPR) 2) Global Reset (GLOBR) 3) Core Reset (CORER) 4) Physical Function Reset (PFR) EMPR is the largest level of reset that the driver can handle. An EMPR resets the manageability block and also the data path, including PHY and link for all the PFs. The affected PFs are notified of this event through a miscellaneous interrupt. GLOBR is a subset of EMPR. It does everything EMPR does except that it doesn't reset the manageability block. CORER is a subset of GLOBR. It does everything GLOBR does but doesn't reset PHY and link. PFR is a subset of CORER and affects only the given physical function. In other words, PFR can be thought of as a CORER for a single PF. Since only the issuing PF is affected, a PFR doesn't result in the miscellaneous interrupt being triggered. All the resets have the following in common: 1) Tx/Rx is halted and all queues are stopped. 2) All the VSIs and filters programmed for the PF are lost and have to be reprogrammed. 3) Control queue interfaces are reset and have to be reprogrammed. In the rebuild flow, control queues are reinitialized, VSIs are reallocated and filters are restored. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:18 +08:00
unregister_netdev(vsi->netdev);
free_netdev(vsi->netdev);
vsi->netdev = NULL;
}
err_vsi:
ice_vsi_clear(vsi);
set_bit(__ICE_RESET_FAILED, vsi->back->state);
return ret;
}
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
/**
* ice_vsi_setup - Set up a VSI by a given type
* @pf: board private structure
* @type: VSI type
* @pi: pointer to the port_info instance
*
* This allocates the sw VSI structure and its queue resources.
*
* Returns pointer to the successfully allocated and configure VSI sw struct on
* success, otherwise returns NULL on failure.
*/
static struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, enum ice_vsi_type type,
struct ice_port_info *pi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
struct device *dev = &pf->pdev->dev;
struct ice_vsi_ctx ctxt = { 0 };
struct ice_vsi *vsi;
int ret, i;
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
vsi = ice_vsi_alloc(pf, type);
if (!vsi) {
dev_err(dev, "could not allocate VSI\n");
return NULL;
}
vsi->port_info = pi;
vsi->vsw = pf->first_sw;
if (ice_vsi_get_qs(vsi)) {
dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
vsi->idx);
goto err_get_qs;
}
/* set RSS capabilities */
ice_vsi_set_rss_params(vsi);
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
/* create the VSI */
ret = ice_vsi_add(vsi);
if (ret)
goto err_vsi;
ctxt.vsi_num = vsi->vsi_num;
switch (vsi->type) {
case ICE_VSI_PF:
ret = ice_cfg_netdev(vsi);
if (ret)
goto err_cfg_netdev;
ret = register_netdev(vsi->netdev);
if (ret)
goto err_register_netdev;
netif_carrier_off(vsi->netdev);
/* make sure transmit queues start off as stopped */
netif_tx_stop_all_queues(vsi->netdev);
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
goto err_msix;
ret = ice_vsi_setup_vector_base(vsi);
if (ret)
goto err_rings;
ret = ice_vsi_alloc_rings(vsi);
if (ret)
goto err_rings;
ice_vsi_map_rings_to_vectors(vsi);
/* Do not exit if configuring RSS had an issue, at least
* receive traffic on first queue. Hence no need to capture
* return value
*/
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_vsi_cfg_rss(vsi);
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
break;
default:
/* if vsi type is not recognized, clean up the resources and
* exit
*/
goto err_rings;
}
ice_vsi_set_tc_cfg(vsi);
/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++)
max_txqs[i] = vsi->num_txq;
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num,
vsi->tc_cfg.ena_tc, max_txqs);
if (ret) {
dev_info(&pf->pdev->dev, "Failed VSI lan queue config\n");
goto err_rings;
}
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
return vsi;
err_rings:
ice_vsi_free_q_vectors(vsi);
err_msix:
if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(vsi->netdev);
err_register_netdev:
if (vsi->netdev) {
free_netdev(vsi->netdev);
vsi->netdev = NULL;
}
err_cfg_netdev:
ret = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL);
if (ret)
dev_err(&vsi->back->pdev->dev,
"Free VSI AQ call failed, err %d\n", ret);
err_vsi:
ice_vsi_put_qs(vsi);
err_get_qs:
pf->q_left_tx += vsi->alloc_txq;
pf->q_left_rx += vsi->alloc_rxq;
ice_vsi_clear(vsi);
return NULL;
}
/**
* ice_vsi_add_vlan - Add vsi membership for given vlan
* @vsi: the vsi being configured
* @vid: vlan id to be added
*/
static int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
{
struct ice_fltr_list_entry *tmp;
struct ice_pf *pf = vsi->back;
LIST_HEAD(tmp_add_list);
enum ice_status status;
int err = 0;
tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
tmp->fltr_info.flag = ICE_FLTR_TX;
tmp->fltr_info.src = vsi->vsi_num;
tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
tmp->fltr_info.l_data.vlan.vlan_id = vid;
INIT_LIST_HEAD(&tmp->list_entry);
list_add(&tmp->list_entry, &tmp_add_list);
status = ice_add_vlan(&pf->hw, &tmp_add_list);
if (status) {
err = -ENODEV;
dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n",
vid, vsi->vsi_num);
}
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
return err;
}
/**
* ice_vlan_rx_add_vid - Add a vlan id filter to HW offload
* @netdev: network interface to be adjusted
* @proto: unused protocol
* @vid: vlan id to be added
*
* net_device_ops implementation for adding vlan ids
*/
static int ice_vlan_rx_add_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
int ret = 0;
if (vid >= VLAN_N_VID) {
netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
vid, VLAN_N_VID);
return -EINVAL;
}
if (vsi->info.pvid)
return -EINVAL;
/* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is
* needed to continue allowing all untagged packets since VLAN prune
* list is applied to all packets by the switch
*/
ret = ice_vsi_add_vlan(vsi, vid);
if (!ret)
set_bit(vid, vsi->active_vlans);
return ret;
}
/**
* ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
* @vsi: the VSI being configured
* @vid: VLAN id to be removed
*/
static void ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
{
struct ice_fltr_list_entry *list;
struct ice_pf *pf = vsi->back;
LIST_HEAD(tmp_add_list);
list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
if (!list)
return;
list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
list->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
list->fltr_info.l_data.vlan.vlan_id = vid;
list->fltr_info.flag = ICE_FLTR_TX;
list->fltr_info.src = vsi->vsi_num;
INIT_LIST_HEAD(&list->list_entry);
list_add(&list->list_entry, &tmp_add_list);
if (ice_remove_vlan(&pf->hw, &tmp_add_list))
dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n",
vid, vsi->vsi_num);
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
}
/**
* ice_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
* @netdev: network interface to be adjusted
* @proto: unused protocol
* @vid: vlan id to be removed
*
* net_device_ops implementation for removing vlan ids
*/
static int ice_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
if (vsi->info.pvid)
return -EINVAL;
/* return code is ignored as there is nothing a user
* can do about failure to remove and a log message was
* already printed from the other function
*/
ice_vsi_kill_vlan(vsi, vid);
clear_bit(vid, vsi->active_vlans);
return 0;
}
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
/**
* ice_setup_pf_sw - Setup the HW switch on startup or after reset
* @pf: board private structure
*
* Returns 0 on success, negative value on failure
*/
static int ice_setup_pf_sw(struct ice_pf *pf)
{
ice: Add support for switch filter programming A VSI needs traffic directed towards it. This is done by programming filter rules on the switch (embedded vSwitch) element in the hardware, which connects the VSI to the ingress/egress port. This patch introduces data structures and functions necessary to add remove or update switch rules on the switch element. This is a pretty low level function that is generic enough to add a whole range of filters. This patch also introduces two top level functions ice_add_mac and ice_remove mac which through a series of intermediate helper functions eventually call ice_aq_sw_rules to add/delete simple MAC based filters. It's worth noting that one invocation of ice_add_mac/ice_remove_mac is capable of adding/deleting multiple MAC filters. Also worth noting is the fact that the driver maintains a list of currently active filters, so every filter addition/removal causes an update to this list. This is done for a couple of reasons: 1) If two VSIs try to add the same filters, we need to detect it and do things a little differently (i.e. use VSI lists, described below) as the same filter can't be added more than once. 2) In the event of a hardware reset we can simply walk through this list and restore the filters. VSI Lists: In a multi-VSI situation, it's possible that multiple VSIs want to add the same filter rule. For example, two VSIs that want to receive broadcast traffic would both add a filter for destination MAC ff:ff:ff:ff:ff:ff. This can become cumbersome to maintain and so this is handled using a VSI list. A VSI list is resource that can be allocated in the hardware using the ice_aq_alloc_free_res admin queue command. Simply put, a VSI list can be thought of as a subscription list containing a set of VSIs to which the packet should be forwarded, should the filter match. For example, if VSI-0 has already added a broadcast filter, and VSI-1 wants to do the same thing, the filter creation flow will detect this, allocate a VSI list and update the switch rule so that broadcast traffic will now be forwarded to the VSI list which contains VSI-0 and VSI-1. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:12 +08:00
LIST_HEAD(tmp_add_list);
u8 broadcast[ETH_ALEN];
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
struct ice_vsi *vsi;
int status = 0;
ice: Support link events, reset and rebuild Link events are posted to a PF's admin receive queue (ARQ). This patch adds the ability to detect and process link events. This patch also adds the ability to process resets. The driver can process the following resets: 1) EMP Reset (EMPR) 2) Global Reset (GLOBR) 3) Core Reset (CORER) 4) Physical Function Reset (PFR) EMPR is the largest level of reset that the driver can handle. An EMPR resets the manageability block and also the data path, including PHY and link for all the PFs. The affected PFs are notified of this event through a miscellaneous interrupt. GLOBR is a subset of EMPR. It does everything EMPR does except that it doesn't reset the manageability block. CORER is a subset of GLOBR. It does everything GLOBR does but doesn't reset PHY and link. PFR is a subset of CORER and affects only the given physical function. In other words, PFR can be thought of as a CORER for a single PF. Since only the issuing PF is affected, a PFR doesn't result in the miscellaneous interrupt being triggered. All the resets have the following in common: 1) Tx/Rx is halted and all queues are stopped. 2) All the VSIs and filters programmed for the PF are lost and have to be reprogrammed. 3) Control queue interfaces are reset and have to be reprogrammed. In the rebuild flow, control queues are reinitialized, VSIs are reallocated and filters are restored. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:18 +08:00
if (!ice_is_reset_recovery_pending(pf->state)) {
vsi = ice_vsi_setup(pf, ICE_VSI_PF, pf->hw.port_info);
if (!vsi) {
status = -ENOMEM;
goto error_exit;
}
} else {
vsi = pf->vsi[0];
status = ice_vsi_reinit_setup(vsi);
if (status < 0)
return -EIO;
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
}
ice: Add support for switch filter programming A VSI needs traffic directed towards it. This is done by programming filter rules on the switch (embedded vSwitch) element in the hardware, which connects the VSI to the ingress/egress port. This patch introduces data structures and functions necessary to add remove or update switch rules on the switch element. This is a pretty low level function that is generic enough to add a whole range of filters. This patch also introduces two top level functions ice_add_mac and ice_remove mac which through a series of intermediate helper functions eventually call ice_aq_sw_rules to add/delete simple MAC based filters. It's worth noting that one invocation of ice_add_mac/ice_remove_mac is capable of adding/deleting multiple MAC filters. Also worth noting is the fact that the driver maintains a list of currently active filters, so every filter addition/removal causes an update to this list. This is done for a couple of reasons: 1) If two VSIs try to add the same filters, we need to detect it and do things a little differently (i.e. use VSI lists, described below) as the same filter can't be added more than once. 2) In the event of a hardware reset we can simply walk through this list and restore the filters. VSI Lists: In a multi-VSI situation, it's possible that multiple VSIs want to add the same filter rule. For example, two VSIs that want to receive broadcast traffic would both add a filter for destination MAC ff:ff:ff:ff:ff:ff. This can become cumbersome to maintain and so this is handled using a VSI list. A VSI list is resource that can be allocated in the hardware using the ice_aq_alloc_free_res admin queue command. Simply put, a VSI list can be thought of as a subscription list containing a set of VSIs to which the packet should be forwarded, should the filter match. For example, if VSI-0 has already added a broadcast filter, and VSI-1 wants to do the same thing, the filter creation flow will detect this, allocate a VSI list and update the switch rule so that broadcast traffic will now be forwarded to the VSI list which contains VSI-0 and VSI-1. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:12 +08:00
/* tmp_add_list contains a list of MAC addresses for which MAC
* filters need to be programmed. Add the VSI's unicast MAC to
* this list
*/
status = ice_add_mac_to_list(vsi, &tmp_add_list,
vsi->port_info->mac.perm_addr);
if (status)
goto error_exit;
/* VSI needs to receive broadcast traffic, so add the broadcast
* MAC address to the list.
*/
eth_broadcast_addr(broadcast);
status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
if (status)
goto error_exit;
/* program MAC filters for entries in tmp_add_list */
status = ice_add_mac(&pf->hw, &tmp_add_list);
if (status) {
dev_err(&pf->pdev->dev, "Could not add MAC filters\n");
status = -ENOMEM;
goto error_exit;
}
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
return status;
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
error_exit:
ice: Add support for switch filter programming A VSI needs traffic directed towards it. This is done by programming filter rules on the switch (embedded vSwitch) element in the hardware, which connects the VSI to the ingress/egress port. This patch introduces data structures and functions necessary to add remove or update switch rules on the switch element. This is a pretty low level function that is generic enough to add a whole range of filters. This patch also introduces two top level functions ice_add_mac and ice_remove mac which through a series of intermediate helper functions eventually call ice_aq_sw_rules to add/delete simple MAC based filters. It's worth noting that one invocation of ice_add_mac/ice_remove_mac is capable of adding/deleting multiple MAC filters. Also worth noting is the fact that the driver maintains a list of currently active filters, so every filter addition/removal causes an update to this list. This is done for a couple of reasons: 1) If two VSIs try to add the same filters, we need to detect it and do things a little differently (i.e. use VSI lists, described below) as the same filter can't be added more than once. 2) In the event of a hardware reset we can simply walk through this list and restore the filters. VSI Lists: In a multi-VSI situation, it's possible that multiple VSIs want to add the same filter rule. For example, two VSIs that want to receive broadcast traffic would both add a filter for destination MAC ff:ff:ff:ff:ff:ff. This can become cumbersome to maintain and so this is handled using a VSI list. A VSI list is resource that can be allocated in the hardware using the ice_aq_alloc_free_res admin queue command. Simply put, a VSI list can be thought of as a subscription list containing a set of VSIs to which the packet should be forwarded, should the filter match. For example, if VSI-0 has already added a broadcast filter, and VSI-1 wants to do the same thing, the filter creation flow will detect this, allocate a VSI list and update the switch rule so that broadcast traffic will now be forwarded to the VSI list which contains VSI-0 and VSI-1. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:12 +08:00
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
if (vsi) {
ice_vsi_free_q_vectors(vsi);
if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(vsi->netdev);
if (vsi->netdev) {
free_netdev(vsi->netdev);
vsi->netdev = NULL;
}
ice: Add support for switch filter programming A VSI needs traffic directed towards it. This is done by programming filter rules on the switch (embedded vSwitch) element in the hardware, which connects the VSI to the ingress/egress port. This patch introduces data structures and functions necessary to add remove or update switch rules on the switch element. This is a pretty low level function that is generic enough to add a whole range of filters. This patch also introduces two top level functions ice_add_mac and ice_remove mac which through a series of intermediate helper functions eventually call ice_aq_sw_rules to add/delete simple MAC based filters. It's worth noting that one invocation of ice_add_mac/ice_remove_mac is capable of adding/deleting multiple MAC filters. Also worth noting is the fact that the driver maintains a list of currently active filters, so every filter addition/removal causes an update to this list. This is done for a couple of reasons: 1) If two VSIs try to add the same filters, we need to detect it and do things a little differently (i.e. use VSI lists, described below) as the same filter can't be added more than once. 2) In the event of a hardware reset we can simply walk through this list and restore the filters. VSI Lists: In a multi-VSI situation, it's possible that multiple VSIs want to add the same filter rule. For example, two VSIs that want to receive broadcast traffic would both add a filter for destination MAC ff:ff:ff:ff:ff:ff. This can become cumbersome to maintain and so this is handled using a VSI list. A VSI list is resource that can be allocated in the hardware using the ice_aq_alloc_free_res admin queue command. Simply put, a VSI list can be thought of as a subscription list containing a set of VSIs to which the packet should be forwarded, should the filter match. For example, if VSI-0 has already added a broadcast filter, and VSI-1 wants to do the same thing, the filter creation flow will detect this, allocate a VSI list and update the switch rule so that broadcast traffic will now be forwarded to the VSI list which contains VSI-0 and VSI-1. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:12 +08:00
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
ice_vsi_delete(vsi);
ice_vsi_put_qs(vsi);
pf->q_left_tx += vsi->alloc_txq;
pf->q_left_rx += vsi->alloc_rxq;
ice_vsi_clear(vsi);
}
return status;
}
/**
* ice_determine_q_usage - Calculate queue distribution
* @pf: board private structure
*
* Return -ENOMEM if we don't get enough queues for all ports
*/
static void ice_determine_q_usage(struct ice_pf *pf)
{
u16 q_left_tx, q_left_rx;
q_left_tx = pf->hw.func_caps.common_cap.num_txq;
q_left_rx = pf->hw.func_caps.common_cap.num_rxq;
pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus());
/* only 1 rx queue unless RSS is enabled */
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
pf->num_lan_rx = 1;
else
pf->num_lan_rx = min_t(int, q_left_rx, num_online_cpus());
pf->q_left_tx = q_left_tx - pf->num_lan_tx;
pf->q_left_rx = q_left_rx - pf->num_lan_rx;
}
/**
* ice_deinit_pf - Unrolls initialziations done by ice_init_pf
* @pf: board private structure to initialize
*/
static void ice_deinit_pf(struct ice_pf *pf)
{
if (pf->serv_tmr.function)
del_timer_sync(&pf->serv_tmr);
if (pf->serv_task.func)
cancel_work_sync(&pf->serv_task);
mutex_destroy(&pf->sw_mutex);
mutex_destroy(&pf->avail_q_mutex);
}
/**
* ice_init_pf - Initialize general software structures (struct ice_pf)
* @pf: board private structure to initialize
*/
static void ice_init_pf(struct ice_pf *pf)
{
bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
set_bit(ICE_FLAG_MSIX_ENA, pf->flags);
mutex_init(&pf->sw_mutex);
mutex_init(&pf->avail_q_mutex);
/* Clear avail_[t|r]x_qs bitmaps (set all to avail) */
mutex_lock(&pf->avail_q_mutex);
bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS);
bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS);
mutex_unlock(&pf->avail_q_mutex);
if (pf->hw.func_caps.common_cap.rss_table_size)
set_bit(ICE_FLAG_RSS_ENA, pf->flags);
/* setup service timer and periodic service task */
timer_setup(&pf->serv_tmr, ice_service_timer, 0);
pf->serv_tmr_period = HZ;
INIT_WORK(&pf->serv_task, ice_service_task);
clear_bit(__ICE_SERVICE_SCHED, pf->state);
}
/**
* ice_ena_msix_range - Request a range of MSIX vectors from the OS
* @pf: board private structure
*
* compute the number of MSIX vectors required (v_budget) and request from
* the OS. Return the number of vectors reserved or negative on failure
*/
static int ice_ena_msix_range(struct ice_pf *pf)
{
int v_left, v_actual, v_budget = 0;
int needed, err, i;
v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
/* reserve one vector for miscellaneous handler */
needed = 1;
v_budget += needed;
v_left -= needed;
/* reserve vectors for LAN traffic */
pf->num_lan_msix = min_t(int, num_online_cpus(), v_left);
v_budget += pf->num_lan_msix;
pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
sizeof(struct msix_entry), GFP_KERNEL);
if (!pf->msix_entries) {
err = -ENOMEM;
goto exit_err;
}
for (i = 0; i < v_budget; i++)
pf->msix_entries[i].entry = i;
/* actually reserve the vectors */
v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
ICE_MIN_MSIX, v_budget);
if (v_actual < 0) {
dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n");
err = v_actual;
goto msix_err;
}
if (v_actual < v_budget) {
dev_warn(&pf->pdev->dev,
"not enough vectors. requested = %d, obtained = %d\n",
v_budget, v_actual);
if (v_actual >= (pf->num_lan_msix + 1)) {
pf->num_avail_msix = v_actual - (pf->num_lan_msix + 1);
} else if (v_actual >= 2) {
pf->num_lan_msix = 1;
pf->num_avail_msix = v_actual - 2;
} else {
pci_disable_msix(pf->pdev);
err = -ERANGE;
goto msix_err;
}
}
return v_actual;
msix_err:
devm_kfree(&pf->pdev->dev, pf->msix_entries);
goto exit_err;
exit_err:
pf->num_lan_msix = 0;
clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
return err;
}
/**
* ice_dis_msix - Disable MSI-X interrupt setup in OS
* @pf: board private structure
*/
static void ice_dis_msix(struct ice_pf *pf)
{
pci_disable_msix(pf->pdev);
devm_kfree(&pf->pdev->dev, pf->msix_entries);
pf->msix_entries = NULL;
clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
}
/**
* ice_init_interrupt_scheme - Determine proper interrupt scheme
* @pf: board private structure to initialize
*/
static int ice_init_interrupt_scheme(struct ice_pf *pf)
{
int vectors = 0;
ssize_t size;
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
vectors = ice_ena_msix_range(pf);
else
return -ENODEV;
if (vectors < 0)
return vectors;
/* set up vector assignment tracking */
size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors);
pf->irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
if (!pf->irq_tracker) {
ice_dis_msix(pf);
return -ENOMEM;
}
pf->irq_tracker->num_entries = vectors;
return 0;
}
/**
* ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
* @pf: board private structure
*/
static void ice_clear_interrupt_scheme(struct ice_pf *pf)
{
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
ice_dis_msix(pf);
devm_kfree(&pf->pdev->dev, pf->irq_tracker);
pf->irq_tracker = NULL;
}
/**
* ice_probe - Device initialization routine
* @pdev: PCI device information struct
* @ent: entry in ice_pci_tbl
*
* Returns 0 on success, negative on failure
*/
static int ice_probe(struct pci_dev *pdev,
const struct pci_device_id __always_unused *ent)
{
struct ice_pf *pf;
struct ice_hw *hw;
int err;
/* this driver uses devres, see Documentation/driver-model/devres.txt */
err = pcim_enable_device(pdev);
if (err)
return err;
err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
if (err) {
dev_err(&pdev->dev, "I/O map error %d\n", err);
return err;
}
pf = devm_kzalloc(&pdev->dev, sizeof(*pf), GFP_KERNEL);
if (!pf)
return -ENOMEM;
/* set up for high or low dma */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
return err;
}
pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
pf->pdev = pdev;
pci_set_drvdata(pdev, pf);
set_bit(__ICE_DOWN, pf->state);
hw = &pf->hw;
hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
hw->back = pf;
hw->vendor_id = pdev->vendor;
hw->device_id = pdev->device;
pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_device_id = pdev->subsystem_device;
hw->bus.device = PCI_SLOT(pdev->devfn);
hw->bus.func = PCI_FUNC(pdev->devfn);
ice_set_ctrlq_len(hw);
pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
#ifndef CONFIG_DYNAMIC_DEBUG
if (debug < -1)
hw->debug_mask = debug;
#endif
err = ice_init_hw(hw);
if (err) {
dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err);
err = -EIO;
goto err_exit_unroll;
}
dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n",
hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
hw->api_maj_ver, hw->api_min_ver);
ice_init_pf(pf);
ice_determine_q_usage(pf);
pf->num_alloc_vsi = min_t(u16, ICE_MAX_VSI_ALLOC,
hw->func_caps.guaranteed_num_vsi);
if (!pf->num_alloc_vsi) {
err = -EIO;
goto err_init_pf_unroll;
}
pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi,
sizeof(struct ice_vsi *), GFP_KERNEL);
if (!pf->vsi) {
err = -ENOMEM;
goto err_init_pf_unroll;
}
err = ice_init_interrupt_scheme(pf);
if (err) {
dev_err(&pdev->dev,
"ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
goto err_init_interrupt_unroll;
}
/* In case of MSIX we are going to setup the misc vector right here
* to handle admin queue events etc. In case of legacy and MSI
* the misc functionality and queue processing is combined in
* the same vector and that gets setup at open.
*/
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
err = ice_req_irq_msix_misc(pf);
if (err) {
dev_err(&pdev->dev,
"setup of misc vector failed: %d\n", err);
goto err_init_interrupt_unroll;
}
}
/* create switch struct for the switch element created by FW on boot */
pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(struct ice_sw),
GFP_KERNEL);
if (!pf->first_sw) {
err = -ENOMEM;
goto err_msix_misc_unroll;
}
pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
pf->first_sw->pf = pf;
/* record the sw_id available for later use */
pf->first_sw->sw_id = hw->port_info->sw_id;
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
err = ice_setup_pf_sw(pf);
if (err) {
dev_err(&pdev->dev,
"probe failed due to setup pf switch:%d\n", err);
goto err_alloc_sw_unroll;
}
ice: Add support for switch filter programming A VSI needs traffic directed towards it. This is done by programming filter rules on the switch (embedded vSwitch) element in the hardware, which connects the VSI to the ingress/egress port. This patch introduces data structures and functions necessary to add remove or update switch rules on the switch element. This is a pretty low level function that is generic enough to add a whole range of filters. This patch also introduces two top level functions ice_add_mac and ice_remove mac which through a series of intermediate helper functions eventually call ice_aq_sw_rules to add/delete simple MAC based filters. It's worth noting that one invocation of ice_add_mac/ice_remove_mac is capable of adding/deleting multiple MAC filters. Also worth noting is the fact that the driver maintains a list of currently active filters, so every filter addition/removal causes an update to this list. This is done for a couple of reasons: 1) If two VSIs try to add the same filters, we need to detect it and do things a little differently (i.e. use VSI lists, described below) as the same filter can't be added more than once. 2) In the event of a hardware reset we can simply walk through this list and restore the filters. VSI Lists: In a multi-VSI situation, it's possible that multiple VSIs want to add the same filter rule. For example, two VSIs that want to receive broadcast traffic would both add a filter for destination MAC ff:ff:ff:ff:ff:ff. This can become cumbersome to maintain and so this is handled using a VSI list. A VSI list is resource that can be allocated in the hardware using the ice_aq_alloc_free_res admin queue command. Simply put, a VSI list can be thought of as a subscription list containing a set of VSIs to which the packet should be forwarded, should the filter match. For example, if VSI-0 has already added a broadcast filter, and VSI-1 wants to do the same thing, the filter creation flow will detect this, allocate a VSI list and update the switch rule so that broadcast traffic will now be forwarded to the VSI list which contains VSI-0 and VSI-1. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:12 +08:00
/* Driver is mostly up */
clear_bit(__ICE_DOWN, pf->state);
/* since everything is good, start the service timer */
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
ice: Support link events, reset and rebuild Link events are posted to a PF's admin receive queue (ARQ). This patch adds the ability to detect and process link events. This patch also adds the ability to process resets. The driver can process the following resets: 1) EMP Reset (EMPR) 2) Global Reset (GLOBR) 3) Core Reset (CORER) 4) Physical Function Reset (PFR) EMPR is the largest level of reset that the driver can handle. An EMPR resets the manageability block and also the data path, including PHY and link for all the PFs. The affected PFs are notified of this event through a miscellaneous interrupt. GLOBR is a subset of EMPR. It does everything EMPR does except that it doesn't reset the manageability block. CORER is a subset of GLOBR. It does everything GLOBR does but doesn't reset PHY and link. PFR is a subset of CORER and affects only the given physical function. In other words, PFR can be thought of as a CORER for a single PF. Since only the issuing PF is affected, a PFR doesn't result in the miscellaneous interrupt being triggered. All the resets have the following in common: 1) Tx/Rx is halted and all queues are stopped. 2) All the VSIs and filters programmed for the PF are lost and have to be reprogrammed. 3) Control queue interfaces are reset and have to be reprogrammed. In the rebuild flow, control queues are reinitialized, VSIs are reallocated and filters are restored. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:18 +08:00
err = ice_init_link_events(pf->hw.port_info);
if (err) {
dev_err(&pdev->dev, "ice_init_link_events failed: %d\n", err);
goto err_alloc_sw_unroll;
}
return 0;
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
err_alloc_sw_unroll:
set_bit(__ICE_DOWN, pf->state);
devm_kfree(&pf->pdev->dev, pf->first_sw);
err_msix_misc_unroll:
ice_free_irq_msix_misc(pf);
err_init_interrupt_unroll:
ice_clear_interrupt_scheme(pf);
devm_kfree(&pdev->dev, pf->vsi);
err_init_pf_unroll:
ice_deinit_pf(pf);
ice_deinit_hw(hw);
err_exit_unroll:
pci_disable_pcie_error_reporting(pdev);
return err;
}
/**
* ice_remove - Device removal routine
* @pdev: PCI device information struct
*/
static void ice_remove(struct pci_dev *pdev)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
int i = 0;
int err;
if (!pf)
return;
set_bit(__ICE_DOWN, pf->state);
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
for (i = 0; i < pf->num_alloc_vsi; i++) {
if (!pf->vsi[i])
continue;
err = ice_vsi_release(pf->vsi[i]);
if (err)
dev_dbg(&pf->pdev->dev, "Failed to release VSI index %d (err %d)\n",
i, err);
}
ice_free_irq_msix_misc(pf);
ice_clear_interrupt_scheme(pf);
ice_deinit_pf(pf);
ice_deinit_hw(&pf->hw);
pci_disable_pcie_error_reporting(pdev);
}
/* ice_pci_tbl - PCI Device ID Table
*
* Wildcard entries (PCI_ANY_ID) should come last
* Last entry must be all 0s
*
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_10G_BASE_T), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SGMII), 0 },
/* required last entry */
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
static struct pci_driver ice_driver = {
.name = KBUILD_MODNAME,
.id_table = ice_pci_tbl,
.probe = ice_probe,
.remove = ice_remove,
};
/**
* ice_module_init - Driver registration routine
*
* ice_module_init is the first routine called when the driver is
* loaded. All it does is register with the PCI subsystem.
*/
static int __init ice_module_init(void)
{
int status;
pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver);
pr_info("%s\n", ice_copyright);
ice_wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, KBUILD_MODNAME);
if (!ice_wq) {
pr_err("Failed to create workqueue\n");
return -ENOMEM;
}
status = pci_register_driver(&ice_driver);
if (status) {
pr_err("failed to register pci driver, err %d\n", status);
destroy_workqueue(ice_wq);
}
return status;
}
module_init(ice_module_init);
/**
* ice_module_exit - Driver exit cleanup routine
*
* ice_module_exit is called just before the driver is removed
* from memory.
*/
static void __exit ice_module_exit(void)
{
pci_unregister_driver(&ice_driver);
destroy_workqueue(ice_wq);
pr_info("module unloaded\n");
}
module_exit(ice_module_exit);
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
/**
* ice_set_mac_address - NDO callback to set mac address
* @netdev: network interface device structure
* @pi: pointer to an address structure
*
* Returns 0 on success, negative on failure
*/
static int ice_set_mac_address(struct net_device *netdev, void *pi)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
struct sockaddr *addr = pi;
enum ice_status status;
LIST_HEAD(a_mac_list);
LIST_HEAD(r_mac_list);
u8 flags = 0;
int err;
u8 *mac;
mac = (u8 *)addr->sa_data;
if (!is_valid_ether_addr(mac))
return -EADDRNOTAVAIL;
if (ether_addr_equal(netdev->dev_addr, mac)) {
netdev_warn(netdev, "already using mac %pM\n", mac);
return 0;
}
if (test_bit(__ICE_DOWN, pf->state) ||
ice_is_reset_recovery_pending(pf->state)) {
netdev_err(netdev, "can't set mac %pM. device not ready\n",
mac);
return -EBUSY;
}
/* When we change the mac address we also have to change the mac address
* based filter rules that were created previously for the old mac
* address. So first, we remove the old filter rule using ice_remove_mac
* and then create a new filter rule using ice_add_mac. Note that for
* both these operations, we first need to form a "list" of mac
* addresses (even though in this case, we have only 1 mac address to be
* added/removed) and this done using ice_add_mac_to_list. Depending on
* the ensuing operation this "list" of mac addresses is either to be
* added or removed from the filter.
*/
err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr);
if (err) {
err = -EADDRNOTAVAIL;
goto free_lists;
}
status = ice_remove_mac(hw, &r_mac_list);
if (status) {
err = -EADDRNOTAVAIL;
goto free_lists;
}
err = ice_add_mac_to_list(vsi, &a_mac_list, mac);
if (err) {
err = -EADDRNOTAVAIL;
goto free_lists;
}
status = ice_add_mac(hw, &a_mac_list);
if (status) {
err = -EADDRNOTAVAIL;
goto free_lists;
}
free_lists:
/* free list entries */
ice_free_fltr_list(&pf->pdev->dev, &r_mac_list);
ice_free_fltr_list(&pf->pdev->dev, &a_mac_list);
if (err) {
netdev_err(netdev, "can't set mac %pM. filter update failed\n",
mac);
return err;
}
/* change the netdev's mac address */
memcpy(netdev->dev_addr, mac, netdev->addr_len);
netdev_dbg(vsi->netdev, "updated mac address to %pM\n",
netdev->dev_addr);
/* write new mac address to the firmware */
flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
if (status) {
netdev_err(netdev, "can't set mac %pM. write to firmware failed.\n",
mac);
}
return 0;
}
/**
* ice_set_rx_mode - NDO callback to set the netdev filters
* @netdev: network interface device structure
*/
static void ice_set_rx_mode(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
if (!vsi)
return;
/* Set the flags to synchronize filters
* ndo_set_rx_mode may be triggered even without a change in netdev
* flags
*/
set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
/* schedule our worker thread which will take care of
* applying the new filter changes
*/
ice_service_task_schedule(vsi->back);
}
/**
* ice_fdb_add - add an entry to the hardware database
* @ndm: the input from the stack
* @tb: pointer to array of nladdr (unused)
* @dev: the net device pointer
* @addr: the MAC address entry being added
* @vid: VLAN id
* @flags: instructions from stack about fdb operation
*/
static int ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
struct net_device *dev, const unsigned char *addr,
u16 vid, u16 flags)
{
int err;
if (vid) {
netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
return -EINVAL;
}
if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
netdev_err(dev, "FDB only supports static addresses\n");
return -EINVAL;
}
if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
err = dev_uc_add_excl(dev, addr);
else if (is_multicast_ether_addr(addr))
err = dev_mc_add_excl(dev, addr);
else
err = -EINVAL;
/* Only return duplicate errors if NLM_F_EXCL is set */
if (err == -EEXIST && !(flags & NLM_F_EXCL))
err = 0;
return err;
}
/**
* ice_fdb_del - delete an entry from the hardware database
* @ndm: the input from the stack
* @tb: pointer to array of nladdr (unused)
* @dev: the net device pointer
* @addr: the MAC address entry being added
* @vid: VLAN id
*/
static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
struct net_device *dev, const unsigned char *addr,
__always_unused u16 vid)
{
int err;
if (ndm->ndm_state & NUD_PERMANENT) {
netdev_err(dev, "FDB only supports static addresses\n");
return -EINVAL;
}
if (is_unicast_ether_addr(addr))
err = dev_uc_del(dev, addr);
else if (is_multicast_ether_addr(addr))
err = dev_mc_del(dev, addr);
else
err = -EINVAL;
return err;
}
/**
* ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
* @vsi: the vsi being changed
*/
static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
{
struct device *dev = &vsi->back->pdev->dev;
struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx ctxt = { 0 };
enum ice_status status;
/* Here we are configuring the VSI to let the driver add VLAN tags by
* setting port_vlan_flags to ICE_AQ_VSI_PVLAN_MODE_ALL. The actual VLAN
* tag insertion happens in the Tx hot path, in ice_tx_map.
*/
ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_MODE_ALL;
ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
ctxt.vsi_num = vsi->vsi_num;
status = ice_aq_update_vsi(hw, &ctxt, NULL);
if (status) {
dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
return -EIO;
}
vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags;
return 0;
}
/**
* ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
* @vsi: the vsi being changed
* @ena: boolean value indicating if this is a enable or disable request
*/
static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
{
struct device *dev = &vsi->back->pdev->dev;
struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx ctxt = { 0 };
enum ice_status status;
/* Here we are configuring what the VSI should do with the VLAN tag in
* the Rx packet. We can either leave the tag in the packet or put it in
* the Rx descriptor.
*/
if (ena) {
/* Strip VLAN tag from Rx packet and put it in the desc */
ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH;
} else {
/* Disable stripping. Leave tag in packet */
ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_NOTHING;
}
ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
ctxt.vsi_num = vsi->vsi_num;
status = ice_aq_update_vsi(hw, &ctxt, NULL);
if (status) {
dev_err(dev, "update VSI for VALN strip failed, ena = %d err %d aq_err %d\n",
ena, status, hw->adminq.sq_last_status);
return -EIO;
}
vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags;
return 0;
}
/**
* ice_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
*/
static int ice_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
int ret = 0;
if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
ret = ice_vsi_manage_vlan_stripping(vsi, true);
else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
ret = ice_vsi_manage_vlan_stripping(vsi, false);
else if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
!(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
ret = ice_vsi_manage_vlan_insertion(vsi);
else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
ret = ice_vsi_manage_vlan_insertion(vsi);
return ret;
}
/**
* ice_vsi_vlan_setup - Setup vlan offload properties on a VSI
* @vsi: VSI to setup vlan properties for
*/
static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
{
int ret = 0;
if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
ret = ice_vsi_manage_vlan_stripping(vsi, true);
if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
ret = ice_vsi_manage_vlan_insertion(vsi);
return ret;
}
/**
* ice_restore_vlan - Reinstate VLANs when vsi/netdev comes back up
* @vsi: the VSI being brought back up
*/
static int ice_restore_vlan(struct ice_vsi *vsi)
{
int err;
u16 vid;
if (!vsi->netdev)
return -EINVAL;
err = ice_vsi_vlan_setup(vsi);
if (err)
return err;
for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) {
err = ice_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), vid);
if (err)
break;
}
return err;
}
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
/**
* ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
* @ring: The Tx ring to configure
* @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
* @pf_q: queue index in the PF space
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
*
* Configure the Tx descriptor ring in TLAN context.
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
*/
static void
ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
{
struct ice_vsi *vsi = ring->vsi;
struct ice_hw *hw = &vsi->back->hw;
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
tlan_ctx->port_num = vsi->port_info->lport;
/* Transmit Queue Length */
tlan_ctx->qlen = ring->count;
/* PF number */
tlan_ctx->pf_num = hw->pf_id;
/* queue belongs to a specific VSI type
* VF / VM index should be programmed per vmvf_type setting:
* for vmvf_type = VF, it is VF number between 0-256
* for vmvf_type = VM, it is VM number between 0-767
* for PF or EMP this field should be set to zero
*/
switch (vsi->type) {
case ICE_VSI_PF:
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
default:
return;
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
}
/* make sure the context is associated with the right VSI */
tlan_ctx->src_vsi = vsi->vsi_num;
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
tlan_ctx->tso_ena = ICE_TX_LEGACY;
tlan_ctx->tso_qnum = pf_q;
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
/* Legacy or Advanced Host Interface:
* 0: Advanced Host Interface
* 1: Legacy Host Interface
*/
tlan_ctx->legacy_int = ICE_TX_LEGACY;
}
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
/**
* ice_vsi_cfg_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured
*
* Return 0 on success and a negative value on error
* Configure the Tx VSI for operation.
*/
static int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
struct ice_aqc_add_txqs_perq *txq;
struct ice_pf *pf = vsi->back;
enum ice_status status;
u16 buf_len, i, pf_q;
int err = 0, tc = 0;
u8 num_q_grps;
buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
if (!qg_buf)
return -ENOMEM;
if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) {
err = -EINVAL;
goto err_cfg_txqs;
}
qg_buf->num_txqs = 1;
num_q_grps = 1;
/* set up and configure the tx queues */
ice_for_each_txq(vsi, i) {
struct ice_tlan_ctx tlan_ctx = { 0 };
pf_q = vsi->txq_map[i];
ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q);
/* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
ice_tlan_ctx_info);
/* init queue specific tail reg. It is referred as transmit
* comm scheduler queue doorbell.
*/
vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
status = ice_ena_vsi_txq(vsi->port_info, vsi->vsi_num, tc,
num_q_grps, qg_buf, buf_len, NULL);
if (status) {
dev_err(&vsi->back->pdev->dev,
"Failed to set LAN Tx queue context, error: %d\n",
status);
err = -ENODEV;
goto err_cfg_txqs;
}
/* Add Tx Queue TEID into the VSI tx ring from the response
* This will complete configuring and enabling the queue.
*/
txq = &qg_buf->txqs[0];
if (pf_q == le16_to_cpu(txq->txq_id))
vsi->tx_rings[i]->txq_teid =
le32_to_cpu(txq->q_teid);
}
err_cfg_txqs:
devm_kfree(&pf->pdev->dev, qg_buf);
return err;
}
/**
* ice_setup_rx_ctx - Configure a receive ring context
* @ring: The Rx ring to configure
*
* Configure the Rx descriptor ring in RLAN context.
*/
static int ice_setup_rx_ctx(struct ice_ring *ring)
{
struct ice_vsi *vsi = ring->vsi;
struct ice_hw *hw = &vsi->back->hw;
u32 rxdid = ICE_RXDID_FLEX_NIC;
struct ice_rlan_ctx rlan_ctx;
u32 regval;
u16 pf_q;
int err;
/* what is RX queue number in global space of 2K rx queues */
pf_q = vsi->rxq_map[ring->q_index];
/* clear the context structure first */
memset(&rlan_ctx, 0, sizeof(rlan_ctx));
rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;
rlan_ctx.qlen = ring->count;
/* Receive Packet Data Buffer Size.
* The Packet Data Buffer Size is defined in 128 byte units.
*/
rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
/* use 32 byte descriptors */
rlan_ctx.dsize = 1;
/* Strip the Ethernet CRC bytes before the packet is posted to host
* memory.
*/
rlan_ctx.crcstrip = 1;
/* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
rlan_ctx.l2tsel = 1;
rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
/* This controls whether VLAN is stripped from inner headers
* The VLAN in the inner L2 header is stripped to the receive
* descriptor if enabled by this flag.
*/
rlan_ctx.showiv = 0;
/* Max packet size for this queue - must not be set to a larger value
* than 5 x DBUF
*/
rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len);
/* Rx queue threshold in units of 64 */
rlan_ctx.lrxqthresh = 1;
/* Enable Flexible Descriptors in the queue context which
* allows this driver to select a specific receive descriptor format
*/
regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
QRXFLXP_CNTXT_RXDID_IDX_M;
/* increasing context priority to pick up profile id;
* default is 0x01; setting to 0x03 to ensure profile
* is programming if prev context is of same priority
*/
regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
QRXFLXP_CNTXT_RXDID_PRIO_M;
wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
/* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
if (err) {
dev_err(&vsi->back->pdev->dev,
"Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
pf_q, err);
return -EIO;
}
/* init queue specific tail register */
ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
writel(0, ring->tail);
ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
ice: Add support for VSI allocation and deallocation This patch introduces data structures and functions to alloc/free VSIs. The driver represents a VSI using the ice_vsi structure. Some noteworthy points about VSI allocation: 1) A VSI is allocated in the firmware using the "add VSI" admin queue command (implemented as ice_aq_add_vsi). The firmware returns an identifier for the allocated VSI. The VSI context is used to program certain aspects (loopback, queue map, etc.) of the VSI's configuration. 2) A VSI is deleted using the "free VSI" admin queue command (implemented as ice_aq_free_vsi). 3) The driver represents a VSI using struct ice_vsi. This is allocated and initialized as part of the ice_vsi_alloc flow, and deallocated as part of the ice_vsi_delete flow. 4) Once the VSI is created, a netdev is allocated and associated with it. The VSI's ring and vector related data structures are also allocated and initialized. 5) A VSI's queues can either be contiguous or scattered. To do this, the driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with the firmware's VSI queue allocation imap. If the VSI can't get a contiguous queue allocation, it will fallback to scatter. This is implemented in ice_vsi_get_qs which is called as part of the VSI setup flow. In the release flow, the VSI's queues are released and the bitmap is updated to reflect this by ice_vsi_put_qs. CC: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Acked-by: Shannon Nelson <shannon.nelson@oracle.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:11 +08:00
return 0;
}
/**
* ice_vsi_cfg_rxqs - Configure the VSI for Rx
* @vsi: the VSI being configured
*
* Return 0 on success and a negative value on error
* Configure the Rx VSI for operation.
*/
static int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
{
int err = 0;
u16 i;
if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
vsi->max_frame = vsi->netdev->mtu +
ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
else
vsi->max_frame = ICE_RXBUF_2048;
vsi->rx_buf_len = ICE_RXBUF_2048;
/* set up individual rings */
for (i = 0; i < vsi->num_rxq && !err; i++)
err = ice_setup_rx_ctx(vsi->rx_rings[i]);
if (err) {
dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n");
return -EIO;
}
return err;
}
/**
* ice_vsi_cfg - Setup the VSI
* @vsi: the VSI being configured
*
* Return 0 on success and negative value on error
*/
static int ice_vsi_cfg(struct ice_vsi *vsi)
{
int err;
ice_set_rx_mode(vsi->netdev);
err = ice_restore_vlan(vsi);
if (err)
return err;
err = ice_vsi_cfg_txqs(vsi);
if (!err)
err = ice_vsi_cfg_rxqs(vsi);
return err;
}
/**
* ice_vsi_stop_tx_rings - Disable Tx rings
* @vsi: the VSI being configured
*/
static int ice_vsi_stop_tx_rings(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
enum ice_status status;
u32 *q_teids, val;
u16 *q_ids, i;
int err = 0;
if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
return -EINVAL;
q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids),
GFP_KERNEL);
if (!q_teids)
return -ENOMEM;
q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids),
GFP_KERNEL);
if (!q_ids) {
err = -ENOMEM;
goto err_alloc_q_ids;
}
/* set up the tx queue list to be disabled */
ice_for_each_txq(vsi, i) {
u16 v_idx;
if (!vsi->tx_rings || !vsi->tx_rings[i]) {
err = -EINVAL;
goto err_out;
}
q_ids[i] = vsi->txq_map[i];
q_teids[i] = vsi->tx_rings[i]->txq_teid;
/* clear cause_ena bit for disabled queues */
val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
val &= ~QINT_TQCTL_CAUSE_ENA_M;
wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
/* software is expected to wait for 100 ns */
ndelay(100);
/* trigger a software interrupt for the vector associated to
* the queue to schedule napi handler
*/
v_idx = vsi->tx_rings[i]->q_vector->v_idx;
wr32(hw, GLINT_DYN_CTL(vsi->base_vector + v_idx),
GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
}
status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids,
NULL);
if (status) {
dev_err(&pf->pdev->dev,
"Failed to disable LAN Tx queues, error: %d\n",
status);
err = -ENODEV;
}
err_out:
devm_kfree(&pf->pdev->dev, q_ids);
err_alloc_q_ids:
devm_kfree(&pf->pdev->dev, q_teids);
return err;
}
/**
* ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
* @pf: the PF being configured
* @pf_q: the PF queue
* @ena: enable or disable state of the queue
*
* This routine will wait for the given Rx queue of the PF to reach the
* enabled or disabled state.
* Returns -ETIMEDOUT in case of failing to reach the requested state after
* multiple retries; else will return 0 in case of success.
*/
static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
{
int i;
for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) {
u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));
if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
break;
usleep_range(10, 20);
}
if (i >= ICE_Q_WAIT_RETRY_LIMIT)
return -ETIMEDOUT;
return 0;
}
/**
* ice_vsi_ctrl_rx_rings - Start or stop a VSI's rx rings
* @vsi: the VSI being configured
* @ena: start or stop the rx rings
*/
static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
int i, j, ret = 0;
for (i = 0; i < vsi->num_rxq; i++) {
int pf_q = vsi->rxq_map[i];
u32 rx_reg;
for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) {
rx_reg = rd32(hw, QRX_CTRL(pf_q));
if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) ==
((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1))
break;
usleep_range(1000, 2000);
}
/* Skip if the queue is already in the requested state */
if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
continue;
/* turn on/off the queue */
if (ena)
rx_reg |= QRX_CTRL_QENA_REQ_M;
else
rx_reg &= ~QRX_CTRL_QENA_REQ_M;
wr32(hw, QRX_CTRL(pf_q), rx_reg);
/* wait for the change to finish */
ret = ice_pf_rxq_wait(pf, pf_q, ena);
if (ret) {
dev_err(&pf->pdev->dev,
"VSI idx %d Rx ring %d %sable timeout\n",
vsi->idx, pf_q, (ena ? "en" : "dis"));
break;
}
}
return ret;
}
/**
* ice_vsi_start_rx_rings - start VSI's rx rings
* @vsi: the VSI whose rings are to be started
*
* Returns 0 on success and a negative value on error
*/
static int ice_vsi_start_rx_rings(struct ice_vsi *vsi)
{
return ice_vsi_ctrl_rx_rings(vsi, true);
}
/**
* ice_vsi_stop_rx_rings - stop VSI's rx rings
* @vsi: the VSI
*
* Returns 0 on success and a negative value on error
*/
static int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
{
return ice_vsi_ctrl_rx_rings(vsi, false);
}
/**
* ice_vsi_stop_tx_rx_rings - stop VSI's tx and rx rings
* @vsi: the VSI
* Returns 0 on success and a negative value on error
*/
static int ice_vsi_stop_tx_rx_rings(struct ice_vsi *vsi)
{
int err_tx, err_rx;
err_tx = ice_vsi_stop_tx_rings(vsi);
if (err_tx)
dev_dbg(&vsi->back->pdev->dev, "Failed to disable Tx rings\n");
err_rx = ice_vsi_stop_rx_rings(vsi);
if (err_rx)
dev_dbg(&vsi->back->pdev->dev, "Failed to disable Rx rings\n");
if (err_tx || err_rx)
return -EIO;
return 0;
}
/**
* ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
* @vsi: the VSI being configured
*/
static void ice_napi_enable_all(struct ice_vsi *vsi)
{
int q_idx;
if (!vsi->netdev)
return;
for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
napi_enable(&vsi->q_vectors[q_idx]->napi);
}
/**
* ice_up_complete - Finish the last steps of bringing up a connection
* @vsi: The VSI being configured
*
* Return 0 on success and negative value on error
*/
static int ice_up_complete(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
int err;
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
ice_vsi_cfg_msix(vsi);
else
return -ENOTSUPP;
/* Enable only Rx rings, Tx rings were enabled by the FW when the
* Tx queue group list was configured and the context bits were
* programmed using ice_vsi_cfg_txqs
*/
err = ice_vsi_start_rx_rings(vsi);
if (err)
return err;
clear_bit(__ICE_DOWN, vsi->state);
ice_napi_enable_all(vsi);
ice_vsi_ena_irq(vsi);
if (vsi->port_info &&
(vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
vsi->netdev) {
ice_print_link_msg(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
}
ice_service_task_schedule(pf);
return err;
}
/**
* ice_up - Bring the connection back up after being down
* @vsi: VSI being configured
*/
int ice_up(struct ice_vsi *vsi)
{
int err;
err = ice_vsi_cfg(vsi);
if (!err)
err = ice_up_complete(vsi);
return err;
}
/**
* ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
* @ring: Tx or Rx ring to read stats from
* @pkts: packets stats counter
* @bytes: bytes stats counter
*
* This function fetches stats from the ring considering the atomic operations
* that needs to be performed to read u64 values in 32 bit machine.
*/
static void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts,
u64 *bytes)
{
unsigned int start;
*pkts = 0;
*bytes = 0;
if (!ring)
return;
do {
start = u64_stats_fetch_begin_irq(&ring->syncp);
*pkts = ring->stats.pkts;
*bytes = ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
}
/**
* ice_stat_update40 - read 40 bit stat from the chip and update stat values
* @hw: ptr to the hardware info
* @hireg: high 32 bit HW register to read from
* @loreg: low 32 bit HW register to read from
* @prev_stat_loaded: bool to specify if previous stats are loaded
* @prev_stat: ptr to previous loaded stat value
* @cur_stat: ptr to current stat value
*/
static void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
bool prev_stat_loaded, u64 *prev_stat,
u64 *cur_stat)
{
u64 new_data;
new_data = rd32(hw, loreg);
new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
/* device stats are not reset at PFR, they likely will not be zeroed
* when the driver starts. So save the first values read and use them as
* offsets to be subtracted from the raw values in order to report stats
* that count from zero.
*/
if (!prev_stat_loaded)
*prev_stat = new_data;
if (likely(new_data >= *prev_stat))
*cur_stat = new_data - *prev_stat;
else
/* to manage the potential roll-over */
*cur_stat = (new_data + BIT_ULL(40)) - *prev_stat;
*cur_stat &= 0xFFFFFFFFFFULL;
}
/**
* ice_stat_update32 - read 32 bit stat from the chip and update stat values
* @hw: ptr to the hardware info
* @reg: HW register to read from
* @prev_stat_loaded: bool to specify if previous stats are loaded
* @prev_stat: ptr to previous loaded stat value
* @cur_stat: ptr to current stat value
*/
static void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat)
{
u32 new_data;
new_data = rd32(hw, reg);
/* device stats are not reset at PFR, they likely will not be zeroed
* when the driver starts. So save the first values read and use them as
* offsets to be subtracted from the raw values in order to report stats
* that count from zero.
*/
if (!prev_stat_loaded)
*prev_stat = new_data;
if (likely(new_data >= *prev_stat))
*cur_stat = new_data - *prev_stat;
else
/* to manage the potential roll-over */
*cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
}
/**
* ice_update_eth_stats - Update VSI-specific ethernet statistics counters
* @vsi: the VSI to be updated
*/
static void ice_update_eth_stats(struct ice_vsi *vsi)
{
struct ice_eth_stats *prev_es, *cur_es;
struct ice_hw *hw = &vsi->back->hw;
u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */
prev_es = &vsi->eth_stats_prev;
cur_es = &vsi->eth_stats;
ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num),
vsi->stat_offsets_loaded, &prev_es->rx_bytes,
&cur_es->rx_bytes);
ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num),
vsi->stat_offsets_loaded, &prev_es->rx_unicast,
&cur_es->rx_unicast);
ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num),
vsi->stat_offsets_loaded, &prev_es->rx_multicast,
&cur_es->rx_multicast);
ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num),
vsi->stat_offsets_loaded, &prev_es->rx_broadcast,
&cur_es->rx_broadcast);
ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
&prev_es->rx_discards, &cur_es->rx_discards);
ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num),
vsi->stat_offsets_loaded, &prev_es->tx_bytes,
&cur_es->tx_bytes);
ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num),
vsi->stat_offsets_loaded, &prev_es->tx_unicast,
&cur_es->tx_unicast);
ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num),
vsi->stat_offsets_loaded, &prev_es->tx_multicast,
&cur_es->tx_multicast);
ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num),
vsi->stat_offsets_loaded, &prev_es->tx_broadcast,
&cur_es->tx_broadcast);
ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
&prev_es->tx_errors, &cur_es->tx_errors);
vsi->stat_offsets_loaded = true;
}
/**
* ice_update_vsi_ring_stats - Update VSI stats counters
* @vsi: the VSI to be updated
*/
static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
{
struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
struct ice_ring *ring;
u64 pkts, bytes;
int i;
/* reset netdev stats */
vsi_stats->tx_packets = 0;
vsi_stats->tx_bytes = 0;
vsi_stats->rx_packets = 0;
vsi_stats->rx_bytes = 0;
/* reset non-netdev (extended) stats */
vsi->tx_restart = 0;
vsi->tx_busy = 0;
vsi->tx_linearize = 0;
vsi->rx_buf_failed = 0;
vsi->rx_page_failed = 0;
rcu_read_lock();
/* update Tx rings counters */
ice_for_each_txq(vsi, i) {
ring = READ_ONCE(vsi->tx_rings[i]);
ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
vsi_stats->tx_packets += pkts;
vsi_stats->tx_bytes += bytes;
vsi->tx_restart += ring->tx_stats.restart_q;
vsi->tx_busy += ring->tx_stats.tx_busy;
vsi->tx_linearize += ring->tx_stats.tx_linearize;
}
/* update Rx rings counters */
ice_for_each_rxq(vsi, i) {
ring = READ_ONCE(vsi->rx_rings[i]);
ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
vsi_stats->rx_packets += pkts;
vsi_stats->rx_bytes += bytes;
vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
}
rcu_read_unlock();
}
/**
* ice_update_vsi_stats - Update VSI stats counters
* @vsi: the VSI to be updated
*/
static void ice_update_vsi_stats(struct ice_vsi *vsi)
{
struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
struct ice_eth_stats *cur_es = &vsi->eth_stats;
struct ice_pf *pf = vsi->back;
if (test_bit(__ICE_DOWN, vsi->state) ||
test_bit(__ICE_CFG_BUSY, pf->state))
return;
/* get stats as recorded by Tx/Rx rings */
ice_update_vsi_ring_stats(vsi);
/* get VSI stats as recorded by the hardware */
ice_update_eth_stats(vsi);
cur_ns->tx_errors = cur_es->tx_errors;
cur_ns->rx_dropped = cur_es->rx_discards;
cur_ns->tx_dropped = cur_es->tx_discards;
cur_ns->multicast = cur_es->rx_multicast;
/* update some more netdev stats if this is main VSI */
if (vsi->type == ICE_VSI_PF) {
cur_ns->rx_crc_errors = pf->stats.crc_errors;
cur_ns->rx_errors = pf->stats.crc_errors +
pf->stats.illegal_bytes;
cur_ns->rx_length_errors = pf->stats.rx_len_errors;
}
}
/**
* ice_update_pf_stats - Update PF port stats counters
* @pf: PF whose stats needs to be updated
*/
static void ice_update_pf_stats(struct ice_pf *pf)
{
struct ice_hw_port_stats *prev_ps, *cur_ps;
struct ice_hw *hw = &pf->hw;
u8 pf_id;
prev_ps = &pf->stats_prev;
cur_ps = &pf->stats;
pf_id = hw->pf_id;
ice_stat_update40(hw, GLPRT_GORCH(pf_id), GLPRT_GORCL(pf_id),
pf->stat_prev_loaded, &prev_ps->eth.rx_bytes,
&cur_ps->eth.rx_bytes);
ice_stat_update40(hw, GLPRT_UPRCH(pf_id), GLPRT_UPRCL(pf_id),
pf->stat_prev_loaded, &prev_ps->eth.rx_unicast,
&cur_ps->eth.rx_unicast);
ice_stat_update40(hw, GLPRT_MPRCH(pf_id), GLPRT_MPRCL(pf_id),
pf->stat_prev_loaded, &prev_ps->eth.rx_multicast,
&cur_ps->eth.rx_multicast);
ice_stat_update40(hw, GLPRT_BPRCH(pf_id), GLPRT_BPRCL(pf_id),
pf->stat_prev_loaded, &prev_ps->eth.rx_broadcast,
&cur_ps->eth.rx_broadcast);
ice_stat_update40(hw, GLPRT_GOTCH(pf_id), GLPRT_GOTCL(pf_id),
pf->stat_prev_loaded, &prev_ps->eth.tx_bytes,
&cur_ps->eth.tx_bytes);
ice_stat_update40(hw, GLPRT_UPTCH(pf_id), GLPRT_UPTCL(pf_id),
pf->stat_prev_loaded, &prev_ps->eth.tx_unicast,
&cur_ps->eth.tx_unicast);
ice_stat_update40(hw, GLPRT_MPTCH(pf_id), GLPRT_MPTCL(pf_id),
pf->stat_prev_loaded, &prev_ps->eth.tx_multicast,
&cur_ps->eth.tx_multicast);
ice_stat_update40(hw, GLPRT_BPTCH(pf_id), GLPRT_BPTCL(pf_id),
pf->stat_prev_loaded, &prev_ps->eth.tx_broadcast,
&cur_ps->eth.tx_broadcast);
ice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded,
&prev_ps->tx_dropped_link_down,
&cur_ps->tx_dropped_link_down);
ice_stat_update40(hw, GLPRT_PRC64H(pf_id), GLPRT_PRC64L(pf_id),
pf->stat_prev_loaded, &prev_ps->rx_size_64,
&cur_ps->rx_size_64);
ice_stat_update40(hw, GLPRT_PRC127H(pf_id), GLPRT_PRC127L(pf_id),
pf->stat_prev_loaded, &prev_ps->rx_size_127,
&cur_ps->rx_size_127);
ice_stat_update40(hw, GLPRT_PRC255H(pf_id), GLPRT_PRC255L(pf_id),
pf->stat_prev_loaded, &prev_ps->rx_size_255,
&cur_ps->rx_size_255);
ice_stat_update40(hw, GLPRT_PRC511H(pf_id), GLPRT_PRC511L(pf_id),
pf->stat_prev_loaded, &prev_ps->rx_size_511,
&cur_ps->rx_size_511);
ice_stat_update40(hw, GLPRT_PRC1023H(pf_id),
GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded,
&prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
ice_stat_update40(hw, GLPRT_PRC1522H(pf_id),
GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded,
&prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
ice_stat_update40(hw, GLPRT_PRC9522H(pf_id),
GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded,
&prev_ps->rx_size_big, &cur_ps->rx_size_big);
ice_stat_update40(hw, GLPRT_PTC64H(pf_id), GLPRT_PTC64L(pf_id),
pf->stat_prev_loaded, &prev_ps->tx_size_64,
&cur_ps->tx_size_64);
ice_stat_update40(hw, GLPRT_PTC127H(pf_id), GLPRT_PTC127L(pf_id),
pf->stat_prev_loaded, &prev_ps->tx_size_127,
&cur_ps->tx_size_127);
ice_stat_update40(hw, GLPRT_PTC255H(pf_id), GLPRT_PTC255L(pf_id),
pf->stat_prev_loaded, &prev_ps->tx_size_255,
&cur_ps->tx_size_255);
ice_stat_update40(hw, GLPRT_PTC511H(pf_id), GLPRT_PTC511L(pf_id),
pf->stat_prev_loaded, &prev_ps->tx_size_511,
&cur_ps->tx_size_511);
ice_stat_update40(hw, GLPRT_PTC1023H(pf_id),
GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded,
&prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
ice_stat_update40(hw, GLPRT_PTC1522H(pf_id),
GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded,
&prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
ice_stat_update40(hw, GLPRT_PTC9522H(pf_id),
GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded,
&prev_ps->tx_size_big, &cur_ps->tx_size_big);
ice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded,
&prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
ice_stat_update32(hw, GLPRT_LXOFFRXC(pf_id), pf->stat_prev_loaded,
&prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
ice_stat_update32(hw, GLPRT_LXONTXC(pf_id), pf->stat_prev_loaded,
&prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
ice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded,
&prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
ice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded,
&prev_ps->crc_errors, &cur_ps->crc_errors);
ice_stat_update32(hw, GLPRT_ILLERRC(pf_id), pf->stat_prev_loaded,
&prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
ice_stat_update32(hw, GLPRT_MLFC(pf_id), pf->stat_prev_loaded,
&prev_ps->mac_local_faults,
&cur_ps->mac_local_faults);
ice_stat_update32(hw, GLPRT_MRFC(pf_id), pf->stat_prev_loaded,
&prev_ps->mac_remote_faults,
&cur_ps->mac_remote_faults);
ice_stat_update32(hw, GLPRT_RLEC(pf_id), pf->stat_prev_loaded,
&prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
ice_stat_update32(hw, GLPRT_RUC(pf_id), pf->stat_prev_loaded,
&prev_ps->rx_undersize, &cur_ps->rx_undersize);
ice_stat_update32(hw, GLPRT_RFC(pf_id), pf->stat_prev_loaded,
&prev_ps->rx_fragments, &cur_ps->rx_fragments);
ice_stat_update32(hw, GLPRT_ROC(pf_id), pf->stat_prev_loaded,
&prev_ps->rx_oversize, &cur_ps->rx_oversize);
ice_stat_update32(hw, GLPRT_RJC(pf_id), pf->stat_prev_loaded,
&prev_ps->rx_jabber, &cur_ps->rx_jabber);
pf->stat_prev_loaded = true;
}
/**
* ice_get_stats64 - get statistics for network device structure
* @netdev: network interface device structure
* @stats: main device statistics structure
*/
static
void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct rtnl_link_stats64 *vsi_stats;
struct ice_vsi *vsi = np->vsi;
vsi_stats = &vsi->net_stats;
if (test_bit(__ICE_DOWN, vsi->state) || !vsi->num_txq || !vsi->num_rxq)
return;
/* netdev packet/byte stats come from ring counter. These are obtained
* by summing up ring counters (done by ice_update_vsi_ring_stats).
*/
ice_update_vsi_ring_stats(vsi);
stats->tx_packets = vsi_stats->tx_packets;
stats->tx_bytes = vsi_stats->tx_bytes;
stats->rx_packets = vsi_stats->rx_packets;
stats->rx_bytes = vsi_stats->rx_bytes;
/* The rest of the stats can be read from the hardware but instead we
* just return values that the watchdog task has already obtained from
* the hardware.
*/
stats->multicast = vsi_stats->multicast;
stats->tx_errors = vsi_stats->tx_errors;
stats->tx_dropped = vsi_stats->tx_dropped;
stats->rx_errors = vsi_stats->rx_errors;
stats->rx_dropped = vsi_stats->rx_dropped;
stats->rx_crc_errors = vsi_stats->rx_crc_errors;
stats->rx_length_errors = vsi_stats->rx_length_errors;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/**
* ice_netpoll - polling "interrupt" handler
* @netdev: network interface device structure
*
* Used by netconsole to send skbs without having to re-enable interrupts.
* This is not called in the normal interrupt path.
*/
static void ice_netpoll(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int i;
if (test_bit(__ICE_DOWN, vsi->state) ||
!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
return;
for (i = 0; i < vsi->num_q_vectors; i++)
ice_msix_clean_rings(0, vsi->q_vectors[i]);
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
/**
* ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
* @vsi: VSI having NAPI disabled
*/
static void ice_napi_disable_all(struct ice_vsi *vsi)
{
int q_idx;
if (!vsi->netdev)
return;
for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
napi_disable(&vsi->q_vectors[q_idx]->napi);
}
/**
* ice_down - Shutdown the connection
* @vsi: The VSI being stopped
*/
int ice_down(struct ice_vsi *vsi)
{
int i, err;
/* Caller of this function is expected to set the
* vsi->state __ICE_DOWN bit
*/
if (vsi->netdev) {
netif_carrier_off(vsi->netdev);
netif_tx_disable(vsi->netdev);
}
ice_vsi_dis_irq(vsi);
err = ice_vsi_stop_tx_rx_rings(vsi);
ice_napi_disable_all(vsi);
ice_for_each_txq(vsi, i)
ice_clean_tx_ring(vsi->tx_rings[i]);
ice_for_each_rxq(vsi, i)
ice_clean_rx_ring(vsi->rx_rings[i]);
if (err)
netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
return err;
}
/**
* ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
* @vsi: VSI having resources allocated
*
* Return 0 on success, negative on failure
*/
static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
{
int i, err;
if (!vsi->num_txq) {
dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
vsi->vsi_num);
return -EINVAL;
}
ice_for_each_txq(vsi, i) {
err = ice_setup_tx_ring(vsi->tx_rings[i]);
if (err)
break;
}
return err;
}
/**
* ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
* @vsi: VSI having resources allocated
*
* Return 0 on success, negative on failure
*/
static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
{
int i, err;
if (!vsi->num_rxq) {
dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
vsi->vsi_num);
return -EINVAL;
}
ice_for_each_rxq(vsi, i) {
err = ice_setup_rx_ring(vsi->rx_rings[i]);
if (err)
break;
}
return err;
}
/**
* ice_vsi_req_irq - Request IRQ from the OS
* @vsi: The VSI IRQ is being requested for
* @basename: name for the vector
*
* Return 0 on success and a negative value on error
*/
static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename)
{
struct ice_pf *pf = vsi->back;
int err = -EINVAL;
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
err = ice_vsi_req_irq_msix(vsi, basename);
return err;
}
/**
* ice_vsi_free_tx_rings - Free Tx resources for VSI queues
* @vsi: the VSI having resources freed
*/
static void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
{
int i;
if (!vsi->tx_rings)
return;
ice_for_each_txq(vsi, i)
if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
ice_free_tx_ring(vsi->tx_rings[i]);
}
/**
* ice_vsi_free_rx_rings - Free Rx resources for VSI queues
* @vsi: the VSI having resources freed
*/
static void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
{
int i;
if (!vsi->rx_rings)
return;
ice_for_each_rxq(vsi, i)
if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
ice_free_rx_ring(vsi->rx_rings[i]);
}
/**
* ice_vsi_open - Called when a network interface is made active
* @vsi: the VSI to open
*
* Initialization of the VSI
*
* Returns 0 on success, negative value on error
*/
static int ice_vsi_open(struct ice_vsi *vsi)
{
char int_name[ICE_INT_NAME_STR_LEN];
struct ice_pf *pf = vsi->back;
int err;
/* allocate descriptors */
err = ice_vsi_setup_tx_rings(vsi);
if (err)
goto err_setup_tx;
err = ice_vsi_setup_rx_rings(vsi);
if (err)
goto err_setup_rx;
err = ice_vsi_cfg(vsi);
if (err)
goto err_setup_rx;
snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
err = ice_vsi_req_irq(vsi, int_name);
if (err)
goto err_setup_rx;
/* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
if (err)
goto err_set_qs;
err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
if (err)
goto err_set_qs;
err = ice_up_complete(vsi);
if (err)
goto err_up_complete;
return 0;
err_up_complete:
ice_down(vsi);
err_set_qs:
ice_vsi_free_irq(vsi);
err_setup_rx:
ice_vsi_free_rx_rings(vsi);
err_setup_tx:
ice_vsi_free_tx_rings(vsi);
return err;
}
/**
* ice_vsi_close - Shut down a VSI
* @vsi: the VSI being shut down
*/
static void ice_vsi_close(struct ice_vsi *vsi)
{
if (!test_and_set_bit(__ICE_DOWN, vsi->state))
ice_down(vsi);
ice_vsi_free_irq(vsi);
ice_vsi_free_tx_rings(vsi);
ice_vsi_free_rx_rings(vsi);
}
/**
* ice_rss_clean - Delete RSS related VSI structures that hold user inputs
* @vsi: the VSI being removed
*/
static void ice_rss_clean(struct ice_vsi *vsi)
{
struct ice_pf *pf;
pf = vsi->back;
if (vsi->rss_hkey_user)
devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user);
if (vsi->rss_lut_user)
devm_kfree(&pf->pdev->dev, vsi->rss_lut_user);
}
/**
* ice_vsi_release - Delete a VSI and free its resources
* @vsi: the VSI being removed
*
* Returns 0 on success or < 0 on error
*/
static int ice_vsi_release(struct ice_vsi *vsi)
{
struct ice_pf *pf;
if (!vsi->back)
return -ENODEV;
pf = vsi->back;
if (vsi->netdev) {
unregister_netdev(vsi->netdev);
free_netdev(vsi->netdev);
vsi->netdev = NULL;
}
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_rss_clean(vsi);
/* Disable VSI and free resources */
ice_vsi_dis_irq(vsi);
ice_vsi_close(vsi);
/* reclaim interrupt vectors back to PF */
ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);
pf->num_avail_msix += vsi->num_q_vectors;
ice_remove_vsi_fltr(&pf->hw, vsi->vsi_num);
ice_vsi_delete(vsi);
ice_vsi_free_q_vectors(vsi);
ice_vsi_clear_rings(vsi);
ice_vsi_put_qs(vsi);
pf->q_left_tx += vsi->alloc_txq;
pf->q_left_rx += vsi->alloc_rxq;
ice_vsi_clear(vsi);
return 0;
}
ice: Support link events, reset and rebuild Link events are posted to a PF's admin receive queue (ARQ). This patch adds the ability to detect and process link events. This patch also adds the ability to process resets. The driver can process the following resets: 1) EMP Reset (EMPR) 2) Global Reset (GLOBR) 3) Core Reset (CORER) 4) Physical Function Reset (PFR) EMPR is the largest level of reset that the driver can handle. An EMPR resets the manageability block and also the data path, including PHY and link for all the PFs. The affected PFs are notified of this event through a miscellaneous interrupt. GLOBR is a subset of EMPR. It does everything EMPR does except that it doesn't reset the manageability block. CORER is a subset of GLOBR. It does everything GLOBR does but doesn't reset PHY and link. PFR is a subset of CORER and affects only the given physical function. In other words, PFR can be thought of as a CORER for a single PF. Since only the issuing PF is affected, a PFR doesn't result in the miscellaneous interrupt being triggered. All the resets have the following in common: 1) Tx/Rx is halted and all queues are stopped. 2) All the VSIs and filters programmed for the PF are lost and have to be reprogrammed. 3) Control queue interfaces are reset and have to be reprogrammed. In the rebuild flow, control queues are reinitialized, VSIs are reallocated and filters are restored. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:18 +08:00
/**
* ice_dis_vsi - pause a VSI
* @vsi: the VSI being paused
*/
static void ice_dis_vsi(struct ice_vsi *vsi)
{
if (test_bit(__ICE_DOWN, vsi->state))
return;
set_bit(__ICE_NEEDS_RESTART, vsi->state);
if (vsi->netdev && netif_running(vsi->netdev) &&
vsi->type == ICE_VSI_PF)
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
ice_vsi_close(vsi);
}
/**
* ice_ena_vsi - resume a VSI
* @vsi: the VSI being resume
*/
static void ice_ena_vsi(struct ice_vsi *vsi)
{
if (!test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state))
return;
if (vsi->netdev && netif_running(vsi->netdev))
vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
else if (ice_vsi_open(vsi))
/* this clears the DOWN bit */
dev_dbg(&vsi->back->pdev->dev, "Failed open VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
}
/**
* ice_pf_dis_all_vsi - Pause all VSIs on a PF
* @pf: the PF
*/
static void ice_pf_dis_all_vsi(struct ice_pf *pf)
{
int v;
ice_for_each_vsi(pf, v)
if (pf->vsi[v])
ice_dis_vsi(pf->vsi[v]);
}
/**
* ice_pf_ena_all_vsi - Resume all VSIs on a PF
* @pf: the PF
*/
static void ice_pf_ena_all_vsi(struct ice_pf *pf)
{
int v;
ice_for_each_vsi(pf, v)
if (pf->vsi[v])
ice_ena_vsi(pf->vsi[v]);
}
/**
* ice_rebuild - rebuild after reset
* @pf: pf to rebuild
*/
static void ice_rebuild(struct ice_pf *pf)
{
struct device *dev = &pf->pdev->dev;
struct ice_hw *hw = &pf->hw;
enum ice_status ret;
int err;
if (test_bit(__ICE_DOWN, pf->state))
goto clear_recovery;
dev_dbg(dev, "rebuilding pf\n");
ret = ice_init_all_ctrlq(hw);
if (ret) {
dev_err(dev, "control queues init failed %d\n", ret);
goto fail_reset;
}
ret = ice_clear_pf_cfg(hw);
if (ret) {
dev_err(dev, "clear PF configuration failed %d\n", ret);
goto fail_reset;
}
ice_clear_pxe_mode(hw);
ret = ice_get_caps(hw);
if (ret) {
dev_err(dev, "ice_get_caps failed %d\n", ret);
goto fail_reset;
}
/* basic nic switch setup */
err = ice_setup_pf_sw(pf);
if (err) {
dev_err(dev, "ice_setup_pf_sw failed\n");
goto fail_reset;
}
/* start misc vector */
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
err = ice_req_irq_msix_misc(pf);
if (err) {
dev_err(dev, "misc vector setup failed: %d\n", err);
goto fail_reset;
}
}
/* restart the VSIs that were rebuilt and running before the reset */
ice_pf_ena_all_vsi(pf);
return;
fail_reset:
ice_shutdown_all_ctrlq(hw);
set_bit(__ICE_RESET_FAILED, pf->state);
clear_recovery:
set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
}
/**
* ice_change_mtu - NDO callback to change the MTU
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
*
* Returns 0 on success, negative on failure
*/
static int ice_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
u8 count = 0;
if (new_mtu == netdev->mtu) {
netdev_warn(netdev, "mtu is already %d\n", netdev->mtu);
return 0;
}
if (new_mtu < netdev->min_mtu) {
netdev_err(netdev, "new mtu invalid. min_mtu is %d\n",
netdev->min_mtu);
return -EINVAL;
} else if (new_mtu > netdev->max_mtu) {
netdev_err(netdev, "new mtu invalid. max_mtu is %d\n",
netdev->min_mtu);
return -EINVAL;
}
/* if a reset is in progress, wait for some time for it to complete */
do {
if (ice_is_reset_recovery_pending(pf->state)) {
count++;
usleep_range(1000, 2000);
} else {
break;
}
} while (count < 100);
if (count == 100) {
netdev_err(netdev, "can't change mtu. Device is busy\n");
return -EBUSY;
}
netdev->mtu = new_mtu;
/* if VSI is up, bring it down and then back up */
if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
int err;
err = ice_down(vsi);
if (err) {
netdev_err(netdev, "change mtu if_up err %d\n", err);
return err;
}
err = ice_up(vsi);
if (err) {
netdev_err(netdev, "change mtu if_up err %d\n", err);
return err;
}
}
netdev_dbg(netdev, "changed mtu to %d\n", new_mtu);
return 0;
}
/**
* ice_set_rss - Set RSS keys and lut
* @vsi: Pointer to VSI structure
* @seed: RSS hash seed
* @lut: Lookup table
* @lut_size: Lookup table size
*
* Returns 0 on success, negative on failure
*/
int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
enum ice_status status;
if (seed) {
struct ice_aqc_get_set_rss_keys *buf =
(struct ice_aqc_get_set_rss_keys *)seed;
status = ice_aq_set_rss_key(hw, vsi->vsi_num, buf);
if (status) {
dev_err(&pf->pdev->dev,
"Cannot set RSS key, err %d aq_err %d\n",
status, hw->adminq.rq_last_status);
return -EIO;
}
}
if (lut) {
status = ice_aq_set_rss_lut(hw, vsi->vsi_num,
vsi->rss_lut_type, lut, lut_size);
if (status) {
dev_err(&pf->pdev->dev,
"Cannot set RSS lut, err %d aq_err %d\n",
status, hw->adminq.rq_last_status);
return -EIO;
}
}
return 0;
}
/**
* ice_get_rss - Get RSS keys and lut
* @vsi: Pointer to VSI structure
* @seed: Buffer to store the keys
* @lut: Buffer to store the lookup table entries
* @lut_size: Size of buffer to store the lookup table entries
*
* Returns 0 on success, negative on failure
*/
int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
enum ice_status status;
if (seed) {
struct ice_aqc_get_set_rss_keys *buf =
(struct ice_aqc_get_set_rss_keys *)seed;
status = ice_aq_get_rss_key(hw, vsi->vsi_num, buf);
if (status) {
dev_err(&pf->pdev->dev,
"Cannot get RSS key, err %d aq_err %d\n",
status, hw->adminq.rq_last_status);
return -EIO;
}
}
if (lut) {
status = ice_aq_get_rss_lut(hw, vsi->vsi_num,
vsi->rss_lut_type, lut, lut_size);
if (status) {
dev_err(&pf->pdev->dev,
"Cannot get RSS lut, err %d aq_err %d\n",
status, hw->adminq.rq_last_status);
return -EIO;
}
}
return 0;
}
/**
* ice_open - Called when a network interface becomes active
* @netdev: network interface device structure
*
* The open entry point is called when a network interface is made
* active by the system (IFF_UP). At this point all resources needed
* for transmit and receive operations are allocated, the interrupt
* handler is registered with the OS, the netdev watchdog is enabled,
* and the stack is notified that the interface is ready.
*
* Returns 0 on success, negative value on failure
*/
static int ice_open(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
int err;
netif_carrier_off(netdev);
err = ice_vsi_open(vsi);
if (err)
netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
return err;
}
/**
* ice_stop - Disables a network interface
* @netdev: network interface device structure
*
* The stop entry point is called when an interface is de-activated by the OS,
* and the netdevice enters the DOWN state. The hardware is still under the
* driver's control, but the netdev interface is disabled.
*
* Returns success only - not allowed to fail
*/
static int ice_stop(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
ice_vsi_close(vsi);
return 0;
}
/**
* ice_features_check - Validate encapsulated packet conforms to limits
* @skb: skb buffer
* @netdev: This port's netdev
* @features: Offload features that the stack believes apply
*/
static netdev_features_t
ice_features_check(struct sk_buff *skb,
struct net_device __always_unused *netdev,
netdev_features_t features)
{
size_t len;
/* No point in doing any of this if neither checksum nor GSO are
* being requested for this frame. We can rule out both by just
* checking for CHECKSUM_PARTIAL
*/
if (skb->ip_summed != CHECKSUM_PARTIAL)
return features;
/* We cannot support GSO if the MSS is going to be less than
* 64 bytes. If it is then we need to drop support for GSO.
*/
if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
features &= ~NETIF_F_GSO_MASK;
len = skb_network_header(skb) - skb->data;
if (len & ~(ICE_TXD_MACLEN_MAX))
goto out_rm_features;
len = skb_transport_header(skb) - skb_network_header(skb);
if (len & ~(ICE_TXD_IPLEN_MAX))
goto out_rm_features;
if (skb->encapsulation) {
len = skb_inner_network_header(skb) - skb_transport_header(skb);
if (len & ~(ICE_TXD_L4LEN_MAX))
goto out_rm_features;
len = skb_inner_transport_header(skb) -
skb_inner_network_header(skb);
if (len & ~(ICE_TXD_IPLEN_MAX))
goto out_rm_features;
}
return features;
out_rm_features:
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
static const struct net_device_ops ice_netdev_ops = {
.ndo_open = ice_open,
.ndo_stop = ice_stop,
.ndo_start_xmit = ice_start_xmit,
.ndo_features_check = ice_features_check,
.ndo_set_rx_mode = ice_set_rx_mode,
.ndo_set_mac_address = ice_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ice_netpoll,
#endif /* CONFIG_NET_POLL_CONTROLLER */
.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
.ndo_set_features = ice_set_features,
.ndo_fdb_add = ice_fdb_add,
.ndo_fdb_del = ice_fdb_del,
};