mirror of https://gitee.com/openkylin/linux.git
Merge branch 'cxgb4-cxgb4vf-fix-warnings-reported-by-sparse'
Rahul Lakkireddy says: ==================== cxgb4/cxgb4vf: fix warnings reported by sparse This series of patches fix various warnings reported by the sparse tool. Patches 1 and 2 fix lock context imbalance warnings. Patch 3 fixes cast to restricted __be64 warning when fetching timestamp in PTP path. Patch 4 fixes several cast to restricted __be32 warnings in TC-U32 offload parser. Patch 5 fixes several cast from restricted __be16 warnings in parsing L4 ports for filters. Patch 6 fixes several restricted __be32 degrades to integer warnings when comparing IP address masks for exact-match filters. Patch 7 fixes cast to restricted __be64 warning when fetching SGE queue contexts in device dump collection. Patch 8 fixes cast from restricted __sum16 warning when saving IPv4 partial checksum. Patch 9 fixes issue with string array scope in DCB path. Patch 10 fixes a set but unused variable warning when DCB is disabled. Patch 11 fixes several kernel-doc comment warnings in cxgb4 driver. Patch 12 fixes several kernel-doc comment warnings in cxgb4vf driver. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
a83024b95a
|
@ -1975,7 +1975,6 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
|
|||
u8 mem_type[CTXT_INGRESS + 1] = { 0 };
|
||||
struct cudbg_buffer temp_buff = { 0 };
|
||||
struct cudbg_ch_cntxt *buff;
|
||||
u64 *dst_off, *src_off;
|
||||
u8 *ctx_buf;
|
||||
u8 i, k;
|
||||
int rc;
|
||||
|
@ -2044,8 +2043,11 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
|
|||
}
|
||||
|
||||
for (j = 0; j < max_ctx_qid; j++) {
|
||||
__be64 *dst_off;
|
||||
u64 *src_off;
|
||||
|
||||
src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE);
|
||||
dst_off = (u64 *)buff->data;
|
||||
dst_off = (__be64 *)buff->data;
|
||||
|
||||
/* The data is stored in 64-bit cpu order. Convert it
|
||||
* to big endian before parsing.
|
||||
|
|
|
@ -136,6 +136,9 @@ static inline __u8 bitswap_1(unsigned char val)
|
|||
((val & 0x02) << 5) |
|
||||
((val & 0x01) << 7);
|
||||
}
|
||||
|
||||
extern const char * const dcb_ver_array[];
|
||||
|
||||
#define CXGB4_DCB_ENABLED true
|
||||
|
||||
#else /* !CONFIG_CHELSIO_T4_DCB */
|
||||
|
|
|
@ -2379,7 +2379,6 @@ static const struct file_operations rss_vf_config_debugfs_fops = {
|
|||
};
|
||||
|
||||
#ifdef CONFIG_CHELSIO_T4_DCB
|
||||
extern char *dcb_ver_array[];
|
||||
|
||||
/* Data Center Briging information for each port.
|
||||
*/
|
||||
|
|
|
@ -588,7 +588,7 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
|
|||
/**
|
||||
* lmm_to_fw_caps - translate ethtool Link Mode Mask to Firmware
|
||||
* capabilities
|
||||
* @et_lmm: ethtool Link Mode Mask
|
||||
* @link_mode_mask: ethtool Link Mode Mask
|
||||
*
|
||||
* Translate ethtool Link Mode Mask into a Firmware Port capabilities
|
||||
* value.
|
||||
|
|
|
@ -165,6 +165,9 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
|
|||
unsigned int tid, bool dip, bool sip, bool dp,
|
||||
bool sp)
|
||||
{
|
||||
u8 *nat_lp = (u8 *)&f->fs.nat_lport;
|
||||
u8 *nat_fp = (u8 *)&f->fs.nat_fport;
|
||||
|
||||
if (dip) {
|
||||
if (f->fs.type) {
|
||||
set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W,
|
||||
|
@ -236,8 +239,9 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
|
|||
}
|
||||
|
||||
set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK,
|
||||
(dp ? f->fs.nat_lport : 0) |
|
||||
(sp ? f->fs.nat_fport << 16 : 0), 1);
|
||||
(dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) |
|
||||
(sp ? (nat_fp[1] << 16 | nat_fp[0] << 24) : 0),
|
||||
1);
|
||||
}
|
||||
|
||||
/* Validate filter spec against configuration done on the card. */
|
||||
|
@ -909,6 +913,9 @@ int set_filter_wr(struct adapter *adapter, int fidx)
|
|||
fwr->fpm = htons(f->fs.mask.fport);
|
||||
|
||||
if (adapter->params.filter2_wr_support) {
|
||||
u8 *nat_lp = (u8 *)&f->fs.nat_lport;
|
||||
u8 *nat_fp = (u8 *)&f->fs.nat_fport;
|
||||
|
||||
fwr->natmode_to_ulp_type =
|
||||
FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ?
|
||||
ULP_MODE_TCPDDP :
|
||||
|
@ -916,8 +923,8 @@ int set_filter_wr(struct adapter *adapter, int fidx)
|
|||
FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode);
|
||||
memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
|
||||
memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
|
||||
fwr->newlport = htons(f->fs.nat_lport);
|
||||
fwr->newfport = htons(f->fs.nat_fport);
|
||||
fwr->newlport = htons(nat_lp[1] | nat_lp[0] << 8);
|
||||
fwr->newfport = htons(nat_fp[1] | nat_fp[0] << 8);
|
||||
}
|
||||
|
||||
/* Mark the filter as "pending" and ship off the Filter Work Request.
|
||||
|
@ -1105,16 +1112,16 @@ static bool is_addr_all_mask(u8 *ipmask, int family)
|
|||
struct in_addr *addr;
|
||||
|
||||
addr = (struct in_addr *)ipmask;
|
||||
if (addr->s_addr == 0xffffffff)
|
||||
if (ntohl(addr->s_addr) == 0xffffffff)
|
||||
return true;
|
||||
} else if (family == AF_INET6) {
|
||||
struct in6_addr *addr6;
|
||||
|
||||
addr6 = (struct in6_addr *)ipmask;
|
||||
if (addr6->s6_addr32[0] == 0xffffffff &&
|
||||
addr6->s6_addr32[1] == 0xffffffff &&
|
||||
addr6->s6_addr32[2] == 0xffffffff &&
|
||||
addr6->s6_addr32[3] == 0xffffffff)
|
||||
if (ntohl(addr6->s6_addr32[0]) == 0xffffffff &&
|
||||
ntohl(addr6->s6_addr32[1]) == 0xffffffff &&
|
||||
ntohl(addr6->s6_addr32[2]) == 0xffffffff &&
|
||||
ntohl(addr6->s6_addr32[3]) == 0xffffffff)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
|
|
@ -449,7 +449,7 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
|
|||
* or -1
|
||||
* @addr: the new MAC address value
|
||||
* @persist: whether a new MAC allocation should be persistent
|
||||
* @add_smt: if true also add the address to the HW SMT
|
||||
* @smt_idx: the destination to store the new SMT index.
|
||||
*
|
||||
* Modifies an MPS filter and sets it to the new MAC address if
|
||||
* @tcam_idx >= 0, or adds the MAC address to a new filter if
|
||||
|
@ -1615,6 +1615,7 @@ static int tid_init(struct tid_info *t)
|
|||
* @stid: the server TID
|
||||
* @sip: local IP address to bind server to
|
||||
* @sport: the server's TCP port
|
||||
* @vlan: the VLAN header information
|
||||
* @queue: queue to direct messages from this server to
|
||||
*
|
||||
* Create an IP server for the given port and address.
|
||||
|
@ -2609,7 +2610,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
|
|||
|
||||
/* Clear out filter specifications */
|
||||
memset(&f->fs, 0, sizeof(struct ch_filter_specification));
|
||||
f->fs.val.lport = cpu_to_be16(sport);
|
||||
f->fs.val.lport = be16_to_cpu(sport);
|
||||
f->fs.mask.lport = ~0;
|
||||
val = (u8 *)&sip;
|
||||
if ((val[0] | val[1] | val[2] | val[3]) != 0) {
|
||||
|
@ -5377,10 +5378,10 @@ static inline bool is_x_10g_port(const struct link_config *lc)
|
|||
static int cfg_queues(struct adapter *adap)
|
||||
{
|
||||
u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
|
||||
u32 i, n10g = 0, qidx = 0, n1g = 0;
|
||||
u32 ncpus = num_online_cpus();
|
||||
u32 niqflint, neq, num_ulds;
|
||||
struct sge *s = &adap->sge;
|
||||
u32 i, n10g = 0, qidx = 0;
|
||||
u32 q10g = 0, q1g;
|
||||
|
||||
/* Reduce memory usage in kdump environment, disable all offload. */
|
||||
|
@ -5426,7 +5427,6 @@ static int cfg_queues(struct adapter *adap)
|
|||
if (n10g)
|
||||
q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
|
||||
|
||||
n1g = adap->params.nports - n10g;
|
||||
#ifdef CONFIG_CHELSIO_T4_DCB
|
||||
/* For Data Center Bridging support we need to be able to support up
|
||||
* to 8 Traffic Priorities; each of which will be assigned to its
|
||||
|
@ -5444,7 +5444,8 @@ static int cfg_queues(struct adapter *adap)
|
|||
else
|
||||
q10g = max(8U, q10g);
|
||||
|
||||
while ((q10g * n10g) > (avail_eth_qsets - n1g * q1g))
|
||||
while ((q10g * n10g) >
|
||||
(avail_eth_qsets - (adap->params.nports - n10g) * q1g))
|
||||
q10g--;
|
||||
|
||||
#else /* !CONFIG_CHELSIO_T4_DCB */
|
||||
|
|
|
@ -194,6 +194,7 @@ int cxgb4_ptp_redirect_rx_packet(struct adapter *adapter, struct port_info *pi)
|
|||
}
|
||||
|
||||
/**
|
||||
* cxgb4_ptp_adjfreq - Adjust frequency of PHC cycle counter
|
||||
* @ptp: ptp clock structure
|
||||
* @ppb: Desired frequency change in parts per billion
|
||||
*
|
||||
|
@ -229,7 +230,7 @@ static int cxgb4_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
|
|||
|
||||
/**
|
||||
* cxgb4_ptp_fineadjtime - Shift the time of the hardware clock
|
||||
* @ptp: ptp clock structure
|
||||
* @adapter: board private structure
|
||||
* @delta: Desired change in nanoseconds
|
||||
*
|
||||
* Adjust the timer by resetting the timecounter structure.
|
||||
|
|
|
@ -58,10 +58,6 @@ static struct ch_tc_pedit_fields pedits[] = {
|
|||
PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4),
|
||||
PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8),
|
||||
PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
|
||||
PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0),
|
||||
PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0),
|
||||
PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0),
|
||||
PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0),
|
||||
};
|
||||
|
||||
static struct ch_tc_flower_entry *allocate_flower_entry(void)
|
||||
|
@ -156,14 +152,14 @@ static void cxgb4_process_flow_match(struct net_device *dev,
|
|||
struct flow_match_ports match;
|
||||
|
||||
flow_rule_match_ports(rule, &match);
|
||||
fs->val.lport = cpu_to_be16(match.key->dst);
|
||||
fs->mask.lport = cpu_to_be16(match.mask->dst);
|
||||
fs->val.fport = cpu_to_be16(match.key->src);
|
||||
fs->mask.fport = cpu_to_be16(match.mask->src);
|
||||
fs->val.lport = be16_to_cpu(match.key->dst);
|
||||
fs->mask.lport = be16_to_cpu(match.mask->dst);
|
||||
fs->val.fport = be16_to_cpu(match.key->src);
|
||||
fs->mask.fport = be16_to_cpu(match.mask->src);
|
||||
|
||||
/* also initialize nat_lport/fport to same values */
|
||||
fs->nat_lport = cpu_to_be16(match.key->dst);
|
||||
fs->nat_fport = cpu_to_be16(match.key->src);
|
||||
fs->nat_lport = fs->val.lport;
|
||||
fs->nat_fport = fs->val.fport;
|
||||
}
|
||||
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
|
||||
|
@ -354,12 +350,9 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
|
|||
switch (offset) {
|
||||
case PEDIT_TCP_SPORT_DPORT:
|
||||
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
|
||||
offload_pedit(fs, cpu_to_be32(val) >> 16,
|
||||
cpu_to_be32(mask) >> 16,
|
||||
TCP_SPORT);
|
||||
fs->nat_fport = val;
|
||||
else
|
||||
offload_pedit(fs, cpu_to_be32(val),
|
||||
cpu_to_be32(mask), TCP_DPORT);
|
||||
fs->nat_lport = val >> 16;
|
||||
}
|
||||
fs->nat_mode = NAT_MODE_ALL;
|
||||
break;
|
||||
|
@ -367,12 +360,9 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
|
|||
switch (offset) {
|
||||
case PEDIT_UDP_SPORT_DPORT:
|
||||
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
|
||||
offload_pedit(fs, cpu_to_be32(val) >> 16,
|
||||
cpu_to_be32(mask) >> 16,
|
||||
UDP_SPORT);
|
||||
fs->nat_fport = val;
|
||||
else
|
||||
offload_pedit(fs, cpu_to_be32(val),
|
||||
cpu_to_be32(mask), UDP_DPORT);
|
||||
fs->nat_lport = val >> 16;
|
||||
}
|
||||
fs->nat_mode = NAT_MODE_ALL;
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ static int fill_match_fields(struct adapter *adap,
|
|||
bool next_header)
|
||||
{
|
||||
unsigned int i, j;
|
||||
u32 val, mask;
|
||||
__be32 val, mask;
|
||||
int off, err;
|
||||
bool found;
|
||||
|
||||
|
@ -228,7 +228,7 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
|
|||
const struct cxgb4_next_header *next;
|
||||
bool found = false;
|
||||
unsigned int i, j;
|
||||
u32 val, mask;
|
||||
__be32 val, mask;
|
||||
int off;
|
||||
|
||||
if (t->table[link_uhtid - 1].link_handle) {
|
||||
|
@ -242,10 +242,10 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
|
|||
|
||||
/* Try to find matches that allow jumps to next header. */
|
||||
for (i = 0; next[i].jump; i++) {
|
||||
if (next[i].offoff != cls->knode.sel->offoff ||
|
||||
next[i].shift != cls->knode.sel->offshift ||
|
||||
next[i].mask != cls->knode.sel->offmask ||
|
||||
next[i].offset != cls->knode.sel->off)
|
||||
if (next[i].sel.offoff != cls->knode.sel->offoff ||
|
||||
next[i].sel.offshift != cls->knode.sel->offshift ||
|
||||
next[i].sel.offmask != cls->knode.sel->offmask ||
|
||||
next[i].sel.off != cls->knode.sel->off)
|
||||
continue;
|
||||
|
||||
/* Found a possible candidate. Find a key that
|
||||
|
@ -257,9 +257,9 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
|
|||
val = cls->knode.sel->keys[j].val;
|
||||
mask = cls->knode.sel->keys[j].mask;
|
||||
|
||||
if (next[i].match_off == off &&
|
||||
next[i].match_val == val &&
|
||||
next[i].match_mask == mask) {
|
||||
if (next[i].key.off == off &&
|
||||
next[i].key.val == val &&
|
||||
next[i].key.mask == mask) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -38,12 +38,12 @@
|
|||
struct cxgb4_match_field {
|
||||
int off; /* Offset from the beginning of the header to match */
|
||||
/* Fill the value/mask pair in the spec if matched */
|
||||
int (*val)(struct ch_filter_specification *f, u32 val, u32 mask);
|
||||
int (*val)(struct ch_filter_specification *f, __be32 val, __be32 mask);
|
||||
};
|
||||
|
||||
/* IPv4 match fields */
|
||||
static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f,
|
||||
u32 val, u32 mask)
|
||||
__be32 val, __be32 mask)
|
||||
{
|
||||
f->val.tos = (ntohl(val) >> 16) & 0x000000FF;
|
||||
f->mask.tos = (ntohl(mask) >> 16) & 0x000000FF;
|
||||
|
@ -52,7 +52,7 @@ static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f,
|
|||
}
|
||||
|
||||
static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f,
|
||||
u32 val, u32 mask)
|
||||
__be32 val, __be32 mask)
|
||||
{
|
||||
u32 mask_val;
|
||||
u8 frag_val;
|
||||
|
@ -74,7 +74,7 @@ static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f,
|
|||
}
|
||||
|
||||
static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f,
|
||||
u32 val, u32 mask)
|
||||
__be32 val, __be32 mask)
|
||||
{
|
||||
f->val.proto = (ntohl(val) >> 16) & 0x000000FF;
|
||||
f->mask.proto = (ntohl(mask) >> 16) & 0x000000FF;
|
||||
|
@ -83,7 +83,7 @@ static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f,
|
|||
}
|
||||
|
||||
static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f,
|
||||
u32 val, u32 mask)
|
||||
__be32 val, __be32 mask)
|
||||
{
|
||||
memcpy(&f->val.fip[0], &val, sizeof(u32));
|
||||
memcpy(&f->mask.fip[0], &mask, sizeof(u32));
|
||||
|
@ -92,7 +92,7 @@ static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f,
|
|||
}
|
||||
|
||||
static inline int cxgb4_fill_ipv4_dst_ip(struct ch_filter_specification *f,
|
||||
u32 val, u32 mask)
|
||||
__be32 val, __be32 mask)
|
||||
{
|
||||
memcpy(&f->val.lip[0], &val, sizeof(u32));
|
||||
memcpy(&f->mask.lip[0], &mask, sizeof(u32));
|
||||
|
@ -111,7 +111,7 @@ static const struct cxgb4_match_field cxgb4_ipv4_fields[] = {
|
|||
|
||||
/* IPv6 match fields */
|
||||
static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f,
|
||||
u32 val, u32 mask)
|
||||
__be32 val, __be32 mask)
|
||||
{
|
||||
f->val.tos = (ntohl(val) >> 20) & 0x000000FF;
|
||||
f->mask.tos = (ntohl(mask) >> 20) & 0x000000FF;
|
||||
|
@ -120,7 +120,7 @@ static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f,
|
|||
}
|
||||
|
||||
static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f,
|
||||
u32 val, u32 mask)
|
||||
__be32 val, __be32 mask)
|
||||
{
|
||||
f->val.proto = (ntohl(val) >> 8) & 0x000000FF;
|
||||
f->mask.proto = (ntohl(mask) >> 8) & 0x000000FF;
|
||||
|
@ -129,7 +129,7 @@ static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f,
|
|||
}
|
||||
|
||||
static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f,
|
||||
u32 val, u32 mask)
|
||||
__be32 val, __be32 mask)
|
||||
{
|
||||
memcpy(&f->val.fip[0], &val, sizeof(u32));
|
||||
memcpy(&f->mask.fip[0], &mask, sizeof(u32));
|
||||
|
@ -138,7 +138,7 @@ static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f,
|
|||
}
|
||||
|
||||
static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f,
|
||||
u32 val, u32 mask)
|
||||
__be32 val, __be32 mask)
|
||||
{
|
||||
memcpy(&f->val.fip[4], &val, sizeof(u32));
|
||||
memcpy(&f->mask.fip[4], &mask, sizeof(u32));
|
||||
|
@ -147,7 +147,7 @@ static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f,
|
|||
}
|
||||
|
||||
static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f,
|
||||
u32 val, u32 mask)
|
||||
__be32 val, __be32 mask)
|
||||
{
|
||||
memcpy(&f->val.fip[8], &val, sizeof(u32));
|
||||
memcpy(&f->mask.fip[8], &mask, sizeof(u32));
|
||||
|
@ -156,7 +156,7 @@ static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f,
|
|||
}
|
||||
|
||||
static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f,
|
||||
u32 val, u32 mask)
|
||||
__be32 val, __be32 mask)
|
||||
{
|
||||
memcpy(&f->val.fip[12], &val, sizeof(u32));
|
||||
memcpy(&f->mask.fip[12], &mask, sizeof(u32));
|
||||
|
@ -165,7 +165,7 @@ static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f,
|
|||
}
|
||||
|
||||
static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f,
|
||||
u32 val, u32 mask)
|
||||
__be32 val, __be32 mask)
|
||||
{
|
||||
memcpy(&f->val.lip[0], &val, sizeof(u32));
|
||||
memcpy(&f->mask.lip[0], &mask, sizeof(u32));
|
||||
|
@ -174,7 +174,7 @@ static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f,
|
|||
}
|
||||
|
||||
static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f,
|
||||
u32 val, u32 mask)
|
||||
__be32 val, __be32 mask)
|
||||
{
|
||||
memcpy(&f->val.lip[4], &val, sizeof(u32));
|
||||
memcpy(&f->mask.lip[4], &mask, sizeof(u32));
|
||||
|
@ -183,7 +183,7 @@ static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f,
|
|||
}
|
||||
|
||||
static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f,
|
||||
u32 val, u32 mask)
|
||||
__be32 val, __be32 mask)
|
||||
{
|
||||
memcpy(&f->val.lip[8], &val, sizeof(u32));
|
||||
memcpy(&f->mask.lip[8], &mask, sizeof(u32));
|
||||
|
@ -192,7 +192,7 @@ static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f,
|
|||
}
|
||||
|
||||
static inline int cxgb4_fill_ipv6_dst_ip3(struct ch_filter_specification *f,
|
||||
u32 val, u32 mask)
|
||||
__be32 val, __be32 mask)
|
||||
{
|
||||
memcpy(&f->val.lip[12], &val, sizeof(u32));
|
||||
memcpy(&f->mask.lip[12], &mask, sizeof(u32));
|
||||
|
@ -216,7 +216,7 @@ static const struct cxgb4_match_field cxgb4_ipv6_fields[] = {
|
|||
|
||||
/* TCP/UDP match */
|
||||
static inline int cxgb4_fill_l4_ports(struct ch_filter_specification *f,
|
||||
u32 val, u32 mask)
|
||||
__be32 val, __be32 mask)
|
||||
{
|
||||
f->val.fport = ntohl(val) >> 16;
|
||||
f->mask.fport = ntohl(mask) >> 16;
|
||||
|
@ -237,19 +237,13 @@ static const struct cxgb4_match_field cxgb4_udp_fields[] = {
|
|||
};
|
||||
|
||||
struct cxgb4_next_header {
|
||||
unsigned int offset; /* Offset to next header */
|
||||
/* offset, shift, and mask added to offset above
|
||||
/* Offset, shift, and mask added to beginning of the header
|
||||
* to get to next header. Useful when using a header
|
||||
* field's value to jump to next header such as IHL field
|
||||
* in IPv4 header.
|
||||
*/
|
||||
unsigned int offoff;
|
||||
u32 shift;
|
||||
u32 mask;
|
||||
/* match criteria to make this jump */
|
||||
unsigned int match_off;
|
||||
u32 match_val;
|
||||
u32 match_mask;
|
||||
struct tc_u32_sel sel;
|
||||
struct tc_u32_key key;
|
||||
/* location of jump to make */
|
||||
const struct cxgb4_match_field *jump;
|
||||
};
|
||||
|
@ -258,26 +252,74 @@ struct cxgb4_next_header {
|
|||
* IPv4 header.
|
||||
*/
|
||||
static const struct cxgb4_next_header cxgb4_ipv4_jumps[] = {
|
||||
{ .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
|
||||
.match_off = 8, .match_val = 0x600, .match_mask = 0xFF00,
|
||||
.jump = cxgb4_tcp_fields },
|
||||
{ .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
|
||||
.match_off = 8, .match_val = 0x1100, .match_mask = 0xFF00,
|
||||
.jump = cxgb4_udp_fields },
|
||||
{ .jump = NULL }
|
||||
{
|
||||
/* TCP Jump */
|
||||
.sel = {
|
||||
.off = 0,
|
||||
.offoff = 0,
|
||||
.offshift = 6,
|
||||
.offmask = cpu_to_be16(0x0f00),
|
||||
},
|
||||
.key = {
|
||||
.off = 8,
|
||||
.val = cpu_to_be32(0x00060000),
|
||||
.mask = cpu_to_be32(0x00ff0000),
|
||||
},
|
||||
.jump = cxgb4_tcp_fields,
|
||||
},
|
||||
{
|
||||
/* UDP Jump */
|
||||
.sel = {
|
||||
.off = 0,
|
||||
.offoff = 0,
|
||||
.offshift = 6,
|
||||
.offmask = cpu_to_be16(0x0f00),
|
||||
},
|
||||
.key = {
|
||||
.off = 8,
|
||||
.val = cpu_to_be32(0x00110000),
|
||||
.mask = cpu_to_be32(0x00ff0000),
|
||||
},
|
||||
.jump = cxgb4_udp_fields,
|
||||
},
|
||||
{ .jump = NULL },
|
||||
};
|
||||
|
||||
/* Accept a rule with a jump directly past the 40 Bytes of IPv6 fixed header
|
||||
* to get to transport layer header.
|
||||
*/
|
||||
static const struct cxgb4_next_header cxgb4_ipv6_jumps[] = {
|
||||
{ .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
|
||||
.match_off = 4, .match_val = 0x60000, .match_mask = 0xFF0000,
|
||||
.jump = cxgb4_tcp_fields },
|
||||
{ .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
|
||||
.match_off = 4, .match_val = 0x110000, .match_mask = 0xFF0000,
|
||||
.jump = cxgb4_udp_fields },
|
||||
{ .jump = NULL }
|
||||
{
|
||||
/* TCP Jump */
|
||||
.sel = {
|
||||
.off = 40,
|
||||
.offoff = 0,
|
||||
.offshift = 0,
|
||||
.offmask = 0,
|
||||
},
|
||||
.key = {
|
||||
.off = 4,
|
||||
.val = cpu_to_be32(0x00000600),
|
||||
.mask = cpu_to_be32(0x0000ff00),
|
||||
},
|
||||
.jump = cxgb4_tcp_fields,
|
||||
},
|
||||
{
|
||||
/* UDP Jump */
|
||||
.sel = {
|
||||
.off = 40,
|
||||
.offoff = 0,
|
||||
.offshift = 0,
|
||||
.offmask = 0,
|
||||
},
|
||||
.key = {
|
||||
.off = 4,
|
||||
.val = cpu_to_be32(0x00001100),
|
||||
.mask = cpu_to_be32(0x0000ff00),
|
||||
},
|
||||
.jump = cxgb4_udp_fields,
|
||||
},
|
||||
{ .jump = NULL },
|
||||
};
|
||||
|
||||
struct cxgb4_link {
|
||||
|
|
|
@ -502,41 +502,20 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
|
|||
}
|
||||
EXPORT_SYMBOL(cxgb4_select_ntuple);
|
||||
|
||||
/*
|
||||
* Called when address resolution fails for an L2T entry to handle packets
|
||||
* on the arpq head. If a packet specifies a failure handler it is invoked,
|
||||
* otherwise the packet is sent to the device.
|
||||
*/
|
||||
static void handle_failed_resolution(struct adapter *adap, struct l2t_entry *e)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
|
||||
const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
|
||||
|
||||
spin_unlock(&e->lock);
|
||||
if (cb->arp_err_handler)
|
||||
cb->arp_err_handler(cb->handle, skb);
|
||||
else
|
||||
t4_ofld_send(adap, skb);
|
||||
spin_lock(&e->lock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Called when the host's neighbor layer makes a change to some entry that is
|
||||
* loaded into the HW L2 table.
|
||||
*/
|
||||
void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
|
||||
{
|
||||
struct l2t_entry *e;
|
||||
struct sk_buff_head *arpq = NULL;
|
||||
struct l2t_data *d = adap->l2t;
|
||||
unsigned int addr_len = neigh->tbl->key_len;
|
||||
u32 *addr = (u32 *) neigh->primary_key;
|
||||
int ifidx = neigh->dev->ifindex;
|
||||
int hash = addr_hash(d, addr, addr_len, ifidx);
|
||||
int hash, ifidx = neigh->dev->ifindex;
|
||||
struct sk_buff_head *arpq = NULL;
|
||||
struct l2t_data *d = adap->l2t;
|
||||
struct l2t_entry *e;
|
||||
|
||||
hash = addr_hash(d, addr, addr_len, ifidx);
|
||||
read_lock_bh(&d->lock);
|
||||
for (e = d->l2tab[hash].first; e; e = e->next)
|
||||
if (!addreq(e, addr) && e->ifindex == ifidx) {
|
||||
|
@ -569,8 +548,25 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
|
|||
write_l2e(adap, e, 0);
|
||||
}
|
||||
|
||||
if (arpq)
|
||||
handle_failed_resolution(adap, e);
|
||||
if (arpq) {
|
||||
struct sk_buff *skb;
|
||||
|
||||
/* Called when address resolution fails for an L2T
|
||||
* entry to handle packets on the arpq head. If a
|
||||
* packet specifies a failure handler it is invoked,
|
||||
* otherwise the packet is sent to the device.
|
||||
*/
|
||||
while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
|
||||
const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
|
||||
|
||||
spin_unlock(&e->lock);
|
||||
if (cb->arp_err_handler)
|
||||
cb->arp_err_handler(cb->handle, skb);
|
||||
else
|
||||
t4_ofld_send(adap, skb);
|
||||
spin_lock(&e->lock);
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&e->lock);
|
||||
}
|
||||
|
||||
|
@ -613,6 +609,7 @@ struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan,
|
|||
}
|
||||
|
||||
/**
|
||||
* cxgb4_l2t_alloc_switching - Allocates an L2T entry for switch filters
|
||||
* @dev: net_device pointer
|
||||
* @vlan: VLAN Id
|
||||
* @port: Associated port
|
||||
|
|
|
@ -598,7 +598,7 @@ struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
|
|||
/**
|
||||
* cxgb4_sched_class_free - free a scheduling class
|
||||
* @dev: net_device pointer
|
||||
* @e: scheduling class
|
||||
* @classid: scheduling class id to free
|
||||
*
|
||||
* Frees a scheduling class if there are no users.
|
||||
*/
|
||||
|
|
|
@ -302,7 +302,7 @@ static void deferred_unmap_destructor(struct sk_buff *skb)
|
|||
|
||||
/**
|
||||
* free_tx_desc - reclaims Tx descriptors and their buffers
|
||||
* @adapter: the adapter
|
||||
* @adap: the adapter
|
||||
* @q: the Tx queue to reclaim descriptors from
|
||||
* @n: the number of descriptors to reclaim
|
||||
* @unmap: whether the buffers should be unmapped for DMA
|
||||
|
@ -722,6 +722,7 @@ static inline unsigned int flits_to_desc(unsigned int n)
|
|||
/**
|
||||
* is_eth_imm - can an Ethernet packet be sent as immediate data?
|
||||
* @skb: the packet
|
||||
* @chip_ver: chip version
|
||||
*
|
||||
* Returns whether an Ethernet packet is small enough to fit as
|
||||
* immediate data. Return value corresponds to headroom required.
|
||||
|
@ -749,6 +750,7 @@ static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
|
|||
/**
|
||||
* calc_tx_flits - calculate the number of flits for a packet Tx WR
|
||||
* @skb: the packet
|
||||
* @chip_ver: chip version
|
||||
*
|
||||
* Returns the number of flits needed for a Tx WR for the given Ethernet
|
||||
* packet, including the needed WR and CPL headers.
|
||||
|
@ -804,6 +806,7 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
|
|||
/**
|
||||
* calc_tx_descs - calculate the number of Tx descriptors for a packet
|
||||
* @skb: the packet
|
||||
* @chip_ver: chip version
|
||||
*
|
||||
* Returns the number of Tx descriptors needed for the given Ethernet
|
||||
* packet, including the needed WR and CPL headers.
|
||||
|
@ -1425,12 +1428,10 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
qidx = skb_get_queue_mapping(skb);
|
||||
if (ptp_enabled) {
|
||||
spin_lock(&adap->ptp_lock);
|
||||
if (!(adap->ptp_tx_skb)) {
|
||||
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
||||
adap->ptp_tx_skb = skb_get(skb);
|
||||
} else {
|
||||
spin_unlock(&adap->ptp_lock);
|
||||
goto out_free;
|
||||
}
|
||||
q = &adap->sge.ptptxq;
|
||||
|
@ -1444,11 +1445,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
#ifdef CONFIG_CHELSIO_T4_FCOE
|
||||
ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
|
||||
if (unlikely(ret == -ENOTSUPP)) {
|
||||
if (ptp_enabled)
|
||||
spin_unlock(&adap->ptp_lock);
|
||||
if (unlikely(ret == -EOPNOTSUPP))
|
||||
goto out_free;
|
||||
}
|
||||
#endif /* CONFIG_CHELSIO_T4_FCOE */
|
||||
|
||||
chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
|
||||
|
@ -1461,8 +1459,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
dev_err(adap->pdev_dev,
|
||||
"%s: Tx ring %u full while queue awake!\n",
|
||||
dev->name, qidx);
|
||||
if (ptp_enabled)
|
||||
spin_unlock(&adap->ptp_lock);
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
|
@ -1481,8 +1477,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
|
||||
memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
|
||||
q->mapping_err++;
|
||||
if (ptp_enabled)
|
||||
spin_unlock(&adap->ptp_lock);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
|
@ -1533,8 +1527,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
if (iph->version == 4) {
|
||||
iph->check = 0;
|
||||
iph->tot_len = 0;
|
||||
iph->check = (u16)(~ip_fast_csum((u8 *)iph,
|
||||
iph->ihl));
|
||||
iph->check = ~ip_fast_csum((u8 *)iph, iph->ihl);
|
||||
}
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
||||
cntrl = hwcsum(adap->params.chip, skb);
|
||||
|
@ -1630,8 +1623,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
txq_advance(&q->q, ndesc);
|
||||
|
||||
cxgb4_ring_tx_db(adap, &q->q, ndesc);
|
||||
if (ptp_enabled)
|
||||
spin_unlock(&adap->ptp_lock);
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
out_free:
|
||||
|
@ -2377,6 +2368,16 @@ netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
if (unlikely(qid >= pi->nqsets))
|
||||
return cxgb4_ethofld_xmit(skb, dev);
|
||||
|
||||
if (is_ptp_enabled(skb, dev)) {
|
||||
struct adapter *adap = netdev2adap(dev);
|
||||
netdev_tx_t ret;
|
||||
|
||||
spin_lock(&adap->ptp_lock);
|
||||
ret = cxgb4_eth_xmit(skb, dev);
|
||||
spin_unlock(&adap->ptp_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return cxgb4_eth_xmit(skb, dev);
|
||||
}
|
||||
|
||||
|
@ -2410,9 +2411,9 @@ static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq)
|
|||
|
||||
/**
|
||||
* cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
|
||||
* @dev - netdevice
|
||||
* @eotid - ETHOFLD tid to bind/unbind
|
||||
* @tc - traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid
|
||||
* @dev: netdevice
|
||||
* @eotid: ETHOFLD tid to bind/unbind
|
||||
* @tc: traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid
|
||||
*
|
||||
* Send a FLOWC work request to bind an ETHOFLD TID to a traffic class.
|
||||
* If @tc is set to FW_SCHED_CLS_NONE, then the @eotid is unbound from
|
||||
|
@ -2691,7 +2692,6 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
|
|||
|
||||
/**
|
||||
* txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
|
||||
* @adap: the adapter
|
||||
* @q: the queue to stop
|
||||
*
|
||||
* Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
|
||||
|
@ -3286,7 +3286,7 @@ enum {
|
|||
|
||||
/**
|
||||
* t4_systim_to_hwstamp - read hardware time stamp
|
||||
* @adap: the adapter
|
||||
* @adapter: the adapter
|
||||
* @skb: the packet
|
||||
*
|
||||
* Read Time Stamp from MPS packet and insert in skb which
|
||||
|
@ -3313,15 +3313,16 @@ static noinline int t4_systim_to_hwstamp(struct adapter *adapter,
|
|||
|
||||
hwtstamps = skb_hwtstamps(skb);
|
||||
memset(hwtstamps, 0, sizeof(*hwtstamps));
|
||||
hwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*((u64 *)data)));
|
||||
hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data));
|
||||
|
||||
return RX_PTP_PKT_SUC;
|
||||
}
|
||||
|
||||
/**
|
||||
* t4_rx_hststamp - Recv PTP Event Message
|
||||
* @adap: the adapter
|
||||
* @adapter: the adapter
|
||||
* @rsp: the response queue descriptor holding the RX_PKT message
|
||||
* @rxq: the response queue holding the RX_PKT message
|
||||
* @skb: the packet
|
||||
*
|
||||
* PTP enabled and MPS packet, read HW timestamp
|
||||
|
@ -3345,7 +3346,7 @@ static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp,
|
|||
|
||||
/**
|
||||
* t4_tx_hststamp - Loopback PTP Transmit Event Message
|
||||
* @adap: the adapter
|
||||
* @adapter: the adapter
|
||||
* @skb: the packet
|
||||
* @dev: the ingress net device
|
||||
*
|
||||
|
|
|
@ -103,6 +103,7 @@ static void t4_smte_free(struct smt_entry *e)
|
|||
}
|
||||
|
||||
/**
|
||||
* cxgb4_smt_release - Release SMT entry
|
||||
* @e: smt entry to release
|
||||
*
|
||||
* Releases ref count and frees up an smt entry from SMT table
|
||||
|
@ -231,6 +232,7 @@ static struct smt_entry *t4_smt_alloc_switching(struct adapter *adap, u16 pfvf,
|
|||
}
|
||||
|
||||
/**
|
||||
* cxgb4_smt_alloc_switching - Allocates an SMT entry for switch filters.
|
||||
* @dev: net_device pointer
|
||||
* @smac: MAC address to add to SMT
|
||||
* Returns pointer to the SMT entry created
|
||||
|
|
|
@ -3163,7 +3163,7 @@ int t4_get_tp_version(struct adapter *adapter, u32 *vers)
|
|||
|
||||
/**
|
||||
* t4_get_exprom_version - return the Expansion ROM version (if any)
|
||||
* @adapter: the adapter
|
||||
* @adap: the adapter
|
||||
* @vers: where to place the version
|
||||
*
|
||||
* Reads the Expansion ROM header from FLASH and returns the version
|
||||
|
@ -5310,7 +5310,7 @@ static unsigned int t4_use_ldst(struct adapter *adap)
|
|||
* @cmd: TP fw ldst address space type
|
||||
* @vals: where the indirect register values are stored/written
|
||||
* @nregs: how many indirect registers to read/write
|
||||
* @start_idx: index of first indirect register to read/write
|
||||
* @start_index: index of first indirect register to read/write
|
||||
* @rw: Read (1) or Write (0)
|
||||
* @sleep_ok: if true we may sleep while awaiting command completion
|
||||
*
|
||||
|
@ -6115,7 +6115,7 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
|
|||
|
||||
/**
|
||||
* compute_mps_bg_map - compute the MPS Buffer Group Map for a Port
|
||||
* @adap: the adapter
|
||||
* @adapter: the adapter
|
||||
* @pidx: the port index
|
||||
*
|
||||
* Computes and returns a bitmap indicating which MPS buffer groups are
|
||||
|
@ -6252,7 +6252,7 @@ static unsigned int t4_get_tp_e2c_map(struct adapter *adapter, int pidx)
|
|||
|
||||
/**
|
||||
* t4_get_tp_ch_map - return TP ingress channels associated with a port
|
||||
* @adapter: the adapter
|
||||
* @adap: the adapter
|
||||
* @pidx: the port index
|
||||
*
|
||||
* Returns a bitmap indicating which TP Ingress Channels are associated
|
||||
|
@ -6589,7 +6589,7 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
|
|||
* @phy_addr: the PHY address
|
||||
* @mmd: the PHY MMD to access (0 for clause 22 PHYs)
|
||||
* @reg: the register to write
|
||||
* @valp: value to write
|
||||
* @val: value to write
|
||||
*
|
||||
* Issues a FW command through the given mailbox to write a PHY register.
|
||||
*/
|
||||
|
@ -6615,7 +6615,7 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
|
|||
|
||||
/**
|
||||
* t4_sge_decode_idma_state - decode the idma state
|
||||
* @adap: the adapter
|
||||
* @adapter: the adapter
|
||||
* @state: the state idma is stuck in
|
||||
*/
|
||||
void t4_sge_decode_idma_state(struct adapter *adapter, int state)
|
||||
|
@ -6782,7 +6782,7 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
|
|||
* t4_sge_ctxt_flush - flush the SGE context cache
|
||||
* @adap: the adapter
|
||||
* @mbox: mailbox to use for the FW command
|
||||
* @ctx_type: Egress or Ingress
|
||||
* @ctxt_type: Egress or Ingress
|
||||
*
|
||||
* Issues a FW command through the given mailbox to flush the
|
||||
* SGE context cache.
|
||||
|
@ -6809,7 +6809,7 @@ int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
|
|||
|
||||
/**
|
||||
* t4_read_sge_dbqtimers - read SGE Doorbell Queue Timer values
|
||||
* @adap - the adapter
|
||||
* @adap: the adapter
|
||||
* @ndbqtimers: size of the provided SGE Doorbell Queue Timer table
|
||||
* @dbqtimers: SGE Doorbell Queue Timer table
|
||||
*
|
||||
|
@ -7092,6 +7092,7 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
|
|||
/**
|
||||
* t4_fw_restart - restart the firmware by taking the uP out of RESET
|
||||
* @adap: the adapter
|
||||
* @mbox: mailbox to use for the FW command
|
||||
* @reset: if we want to do a RESET to restart things
|
||||
*
|
||||
* Restart firmware previously halted by t4_fw_halt(). On successful
|
||||
|
@ -7630,6 +7631,8 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
|
|||
* @nmac: number of MAC addresses needed (1 to 5)
|
||||
* @mac: the MAC addresses of the VI
|
||||
* @rss_size: size of RSS table slice associated with this VI
|
||||
* @vivld: the destination to store the VI Valid value.
|
||||
* @vin: the destination to store the VIN value.
|
||||
*
|
||||
* Allocates a virtual interface for the given physical port. If @mac is
|
||||
* not %NULL it contains the MAC addresses of the VI as assigned by FW.
|
||||
|
@ -7848,7 +7851,7 @@ int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
|
|||
* t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support
|
||||
* @adap: the adapter
|
||||
* @viid: the VI id
|
||||
* @mac: the MAC address
|
||||
* @addr: the MAC address
|
||||
* @mask: the mask
|
||||
* @vni: the VNI id for the tunnel protocol
|
||||
* @vni_mask: mask for the VNI id
|
||||
|
@ -7897,11 +7900,11 @@ int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
|
|||
* t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
|
||||
* @adap: the adapter
|
||||
* @viid: the VI id
|
||||
* @mac: the MAC address
|
||||
* @addr: the MAC address
|
||||
* @mask: the mask
|
||||
* @idx: index at which to add this entry
|
||||
* @port_id: the port index
|
||||
* @lookup_type: MAC address for inner (1) or outer (0) header
|
||||
* @port_id: the port index
|
||||
* @sleep_ok: call is allowed to sleep
|
||||
*
|
||||
* Adds the mac entry at the specified index using raw mac interface.
|
||||
|
@ -8126,7 +8129,7 @@ int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
|
|||
* @idx: index of existing filter for old value of MAC address, or -1
|
||||
* @addr: the new MAC address value
|
||||
* @persist: whether a new MAC allocation should be persistent
|
||||
* @add_smt: if true also add the address to the HW SMT
|
||||
* @smt_idx: the destination to store the new SMT index.
|
||||
*
|
||||
* Modifies an exact-match filter and sets it to the new MAC address.
|
||||
* Note that in general it is not possible to modify the value of a given
|
||||
|
@ -8448,7 +8451,6 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
|
|||
|
||||
/**
|
||||
* t4_link_down_rc_str - return a string for a Link Down Reason Code
|
||||
* @adap: the adapter
|
||||
* @link_down_rc: Link Down Reason Code
|
||||
*
|
||||
* Returns a string representation of the Link Down Reason Code.
|
||||
|
@ -8472,9 +8474,7 @@ static const char *t4_link_down_rc_str(unsigned char link_down_rc)
|
|||
return reason[link_down_rc];
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the highest speed set in the port capabilities, in Mb/s.
|
||||
*/
|
||||
/* Return the highest speed set in the port capabilities, in Mb/s. */
|
||||
static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
|
||||
{
|
||||
#define TEST_SPEED_RETURN(__caps_speed, __speed) \
|
||||
|
@ -9110,7 +9110,6 @@ static int t4_get_flash_params(struct adapter *adap)
|
|||
/**
|
||||
* t4_prep_adapter - prepare SW and HW for operation
|
||||
* @adapter: the adapter
|
||||
* @reset: if true perform a HW reset
|
||||
*
|
||||
* Initialize adapter SW state for the various HW modules, set initial
|
||||
* values for some adapter tunables, take PHYs out of reset, and
|
||||
|
@ -10395,6 +10394,7 @@ int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode,
|
|||
/**
|
||||
* t4_i2c_rd - read I2C data from adapter
|
||||
* @adap: the adapter
|
||||
* @mbox: mailbox to use for the FW command
|
||||
* @port: Port number if per-port device; <0 if not
|
||||
* @devid: per-port device ID or absolute device ID
|
||||
* @offset: byte offset into device I2C space
|
||||
|
@ -10450,7 +10450,7 @@ int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
|
|||
|
||||
/**
|
||||
* t4_set_vlan_acl - Set a VLAN id for the specified VF
|
||||
* @adapter: the adapter
|
||||
* @adap: the adapter
|
||||
* @mbox: mailbox to use for the FW command
|
||||
* @vf: one of the VFs instantiated by the specified PF
|
||||
* @vlan: The vlanid to be set
|
||||
|
|
|
@ -260,8 +260,7 @@ static int cxgb4vf_set_addr_hash(struct port_info *pi)
|
|||
* @tcam_idx: TCAM index of existing filter for old value of MAC address,
|
||||
* or -1
|
||||
* @addr: the new MAC address value
|
||||
* @persist: whether a new MAC allocation should be persistent
|
||||
* @add_smt: if true also add the address to the HW SMT
|
||||
* @persistent: whether a new MAC allocation should be persistent
|
||||
*
|
||||
* Modifies an MPS filter and sets it to the new MAC address if
|
||||
* @tcam_idx >= 0, or adds the MAC address to a new filter if
|
||||
|
|
|
@ -1692,7 +1692,7 @@ static inline bool is_new_response(const struct rsp_ctrl *rc,
|
|||
* restore_rx_bufs - put back a packet's RX buffers
|
||||
* @gl: the packet gather list
|
||||
* @fl: the SGE Free List
|
||||
* @nfrags: how many fragments in @si
|
||||
* @frags: how many fragments in @si
|
||||
*
|
||||
* Called when we find out that the current packet, @si, can't be
|
||||
* processed right away for some reason. This is a very rare event and
|
||||
|
@ -2054,7 +2054,7 @@ irq_handler_t t4vf_intr_handler(struct adapter *adapter)
|
|||
|
||||
/**
|
||||
* sge_rx_timer_cb - perform periodic maintenance of SGE RX queues
|
||||
* @data: the adapter
|
||||
* @t: Rx timer
|
||||
*
|
||||
* Runs periodically from a timer to perform maintenance of SGE RX queues.
|
||||
*
|
||||
|
@ -2113,7 +2113,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
|
|||
|
||||
/**
|
||||
* sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues
|
||||
* @data: the adapter
|
||||
* @t: Tx timer
|
||||
*
|
||||
* Runs periodically from a timer to perform maintenance of SGE TX queues.
|
||||
*
|
||||
|
@ -2405,6 +2405,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
|
|||
* t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue
|
||||
* @adapter: the adapter
|
||||
* @txq: pointer to the new txq to be filled in
|
||||
* @dev: the network device
|
||||
* @devq: the network TX queue associated with the new txq
|
||||
* @iqid: the relative ingress queue ID to which events relating to
|
||||
* the new txq should be directed
|
||||
|
|
|
@ -389,9 +389,7 @@ static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
|
|||
return cc_fec;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the highest speed set in the port capabilities, in Mb/s.
|
||||
*/
|
||||
/* Return the highest speed set in the port capabilities, in Mb/s. */
|
||||
static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
|
||||
{
|
||||
#define TEST_SPEED_RETURN(__caps_speed, __speed) \
|
||||
|
@ -1467,6 +1465,7 @@ int t4vf_identify_port(struct adapter *adapter, unsigned int viid,
|
|||
* @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
|
||||
* @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
|
||||
* -1 no change
|
||||
* @sleep_ok: call is allowed to sleep
|
||||
*
|
||||
* Sets Rx properties of a virtual interface.
|
||||
*/
|
||||
|
@ -1906,7 +1905,7 @@ static const char *t4vf_link_down_rc_str(unsigned char link_down_rc)
|
|||
/**
|
||||
* t4vf_handle_get_port_info - process a FW reply message
|
||||
* @pi: the port info
|
||||
* @rpl: start of the FW message
|
||||
* @cmd: start of the FW message
|
||||
*
|
||||
* Processes a GET_PORT_INFO FW reply message.
|
||||
*/
|
||||
|
@ -2137,8 +2136,6 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
*/
|
||||
int t4vf_prep_adapter(struct adapter *adapter)
|
||||
{
|
||||
int err;
|
||||
|
|
Loading…
Reference in New Issue