ice: Misc minor fixes

This is a collection of minor fixes including typos, white space, and
style. No functional changes.

Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
This commit is contained in:
Tony Nguyen 2020-07-29 17:19:22 -07:00
parent 6a2c2b2c1b
commit 7dbc63f0a5
9 changed files with 14 additions and 18 deletions

View File

@ -1718,8 +1718,7 @@ ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
* @num: number of resources
* @res: pointer to array that contains the resources to free
*/
enum ice_status
ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
{
struct ice_aqc_alloc_free_res_elem *buf;
enum ice_status status;
@ -2121,7 +2120,7 @@ ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
* @cap_count: the number of capabilities
*
* Helper device to parse device (0x000B) capabilities list. For
* capabilities shared between device and device, this relies on
* capabilities shared between device and function, this relies on
* ice_parse_common_caps.
*
* Loop through the list of provided capabilities and extract the relevant

View File

@ -357,7 +357,7 @@ void ice_devlink_unregister(struct ice_pf *pf)
*
* Create and register a devlink_port for this PF. Note that although each
* physical function is connected to a separate devlink instance, the port
* will still be numbered according to the physical function id.
* will still be numbered according to the physical function ID.
*
* Return: zero on success or an error code on failure.
*/

View File

@ -644,7 +644,7 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
* This function generates a key from a value, a don't care mask and a never
* match mask.
* upd, dc, and nm are optional parameters, and can be NULL:
* upd == NULL --> udp mask is all 1's (update all bits)
* upd == NULL --> upd mask is all 1's (update all bits)
* dc == NULL --> dc mask is all 0's (no don't care bits)
* nm == NULL --> nm mask is all 0's (no never match bits)
*/

View File

@ -57,7 +57,7 @@
#define PRTDCB_GENS 0x00083020
#define PRTDCB_GENS_DCBX_STATUS_S 0
#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
#define PRTDCB_TUP2TC 0x001D26C0 /* Reset Source: CORER */
#define PRTDCB_TUP2TC 0x001D26C0
#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4))
#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4))
@ -362,6 +362,7 @@
#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
#define PRTRPB_RDPC 0x000AC260
#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4))
#define VSIQF_FD_CNT_FD_GCNT_S 0
#define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0)
@ -378,6 +379,5 @@
#define PFPM_WUS_FW_RST_WK_M BIT(31)
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
#define PRTRPB_RDPC 0x000AC260
#endif /* _ICE_HW_AUTOGEN_H_ */

View File

@ -170,7 +170,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
return ICE_ERR_PARAM;
}
/* query the current node information from FW before additing it
/* query the current node information from FW before adding it
* to the SW DB
*/
status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem);
@ -578,7 +578,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
/**
* ice_aq_rl_profile - performs a rate limiting task
* @hw: pointer to the HW struct
* @opcode:opcode for add, query, or remove profile(s)
* @opcode: opcode for add, query, or remove profile(s)
* @num_profiles: the number of profiles
* @buf: pointer to buffer
* @buf_size: buffer size in bytes

View File

@ -631,9 +631,8 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
dma_addr_t dma;
/* since we are recycling buffers we should seldom need to alloc */
if (likely(page)) {
if (likely(page))
return true;
}
/* alloc new page for storage */
page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
@ -1252,12 +1251,12 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
* @itr: ITR value to update
*
* Calculate how big of an increment should be applied to the ITR value passed
* in based on wmem_default, SKB overhead, Ethernet overhead, and the current
* in based on wmem_default, SKB overhead, ethernet overhead, and the current
* link speed.
*
* The following is a calculation derived from:
* wmem_default / (size + overhead) = desired_pkts_per_int
* rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate
* rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
* (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
*
* Assuming wmem_default is 212992 and overhead is 640 bytes per

View File

@ -321,7 +321,7 @@ struct ice_nvm_info {
u32 flash_size; /* Size of available flash in bytes */
u8 major_ver; /* major version of NVM package */
u8 minor_ver; /* minor version of dev starter */
u8 blank_nvm_mode; /* is NVM empty (no FW present) */
u8 blank_nvm_mode; /* is NVM empty (no FW present) */
};
struct ice_link_default_override_tlv {

View File

@ -2974,8 +2974,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vsi->max_frame = qpi->rxq.max_pkt_size;
}
/* VF can request to configure less than allocated queues
* or default allocated queues. So update the VSI with new number
/* VF can request to configure less than allocated queues or default
* allocated queues. So update the VSI with new number
*/
vsi->num_txq = num_txq;
vsi->num_rxq = num_rxq;

View File

@ -298,7 +298,6 @@ static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid)
}
}
/**
* ice_xsk_umem_disable - disable a UMEM region
* @vsi: Current VSI
@ -594,7 +593,6 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
if (!size)
break;
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
rx_buf->xdp->data_end = rx_buf->xdp->data + size;
xsk_buff_dma_sync_for_cpu(rx_buf->xdp);