mirror of https://gitee.com/openkylin/linux.git
Merge gitolite.kernel.org:/pub/scm/linux/kernel/git/davem/net
Dave writes: "Networking fixes: 1) Fix multiqueue handling of coalesce timer in stmmac, from Jose Abreu. 2) Fix memory corruption in NFC, from Suren Baghdasaryan. 3) Don't write reserved bits in ravb driver, from Kazuya Mizuguchi. 4) SMC bug fixes from Karsten Graul, YueHaibing, and Ursula Braun. 5) Fix TX done race in mvpp2, from Antoine Tenart. 6) ipv6 metrics leak, from Wei Wang. 7) Adjust firmware version requirements in mlxsw, from Petr Machata. 8) Fix autonegotiation on resume in r8169, from Heiner Kallweit. 9) Fixed missing entries when dumping /proc/net/if_inet6, from Jeff Barnhill. 10) Fix double free in devlink, from Dan Carpenter. 11) Fix ethtool regression from UFO feature removal, from Maciej Żenczykowski. 12) Fix drivers that have a ndo_poll_controller() that captures the cpu entirely on loaded hosts by trying to drain all rx and tx queues, from Eric Dumazet. 13) Fix memory corruption with jumbo frames in aquantia driver, from Friedemann Gerold." * gitolite.kernel.org:/pub/scm/linux/kernel/git/davem/net: (79 commits) net: mvneta: fix the remaining Rx descriptor unmapping issues ip_tunnel: be careful when accessing the inner header mpls: allow routes on ip6gre devices net: aquantia: memory corruption on jumbo frames tun: remove ndo_poll_controller nfp: remove ndo_poll_controller bnxt: remove ndo_poll_controller bnx2x: remove ndo_poll_controller mlx5: remove ndo_poll_controller mlx4: remove ndo_poll_controller i40evf: remove ndo_poll_controller ice: remove ndo_poll_controller igb: remove ndo_poll_controller ixgb: remove ndo_poll_controller fm10k: remove ndo_poll_controller ixgbevf: remove ndo_poll_controller ixgbe: remove ndo_poll_controller bonding: use netpoll_poll_dev() helper netpoll: make ndo_poll_controller() optional rds: Fix build regression. ...
This commit is contained in:
commit
2dd68cc7fd
|
@ -971,16 +971,13 @@ static void bond_poll_controller(struct net_device *bond_dev)
|
|||
struct slave *slave = NULL;
|
||||
struct list_head *iter;
|
||||
struct ad_info ad_info;
|
||||
struct netpoll_info *ni;
|
||||
const struct net_device_ops *ops;
|
||||
|
||||
if (BOND_MODE(bond) == BOND_MODE_8023AD)
|
||||
if (bond_3ad_get_active_agg_info(bond, &ad_info))
|
||||
return;
|
||||
|
||||
bond_for_each_slave_rcu(bond, slave, iter) {
|
||||
ops = slave->dev->netdev_ops;
|
||||
if (!bond_slave_is_up(slave) || !ops->ndo_poll_controller)
|
||||
if (!bond_slave_is_up(slave))
|
||||
continue;
|
||||
|
||||
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
|
||||
|
@ -992,11 +989,7 @@ static void bond_poll_controller(struct net_device *bond_dev)
|
|||
continue;
|
||||
}
|
||||
|
||||
ni = rcu_dereference_bh(slave->dev->npinfo);
|
||||
if (down_trylock(&ni->dev_lock))
|
||||
continue;
|
||||
ops->ndo_poll_controller(slave->dev);
|
||||
up(&ni->dev_lock);
|
||||
netpoll_poll_dev(slave->dev);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -154,7 +154,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
|
|||
static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
|
||||
static void bmac_set_timeout(struct net_device *dev);
|
||||
static void bmac_tx_timeout(struct timer_list *t);
|
||||
static int bmac_output(struct sk_buff *skb, struct net_device *dev);
|
||||
static netdev_tx_t bmac_output(struct sk_buff *skb, struct net_device *dev);
|
||||
static void bmac_start(struct net_device *dev);
|
||||
|
||||
#define DBDMA_SET(x) ( ((x) | (x) << 16) )
|
||||
|
@ -1456,7 +1456,7 @@ bmac_start(struct net_device *dev)
|
|||
spin_unlock_irqrestore(&bp->lock, flags);
|
||||
}
|
||||
|
||||
static int
|
||||
static netdev_tx_t
|
||||
bmac_output(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct bmac_data *bp = netdev_priv(dev);
|
||||
|
|
|
@ -78,7 +78,7 @@ struct mace_data {
|
|||
|
||||
static int mace_open(struct net_device *dev);
|
||||
static int mace_close(struct net_device *dev);
|
||||
static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
|
||||
static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
|
||||
static void mace_set_multicast(struct net_device *dev);
|
||||
static void mace_reset(struct net_device *dev);
|
||||
static int mace_set_address(struct net_device *dev, void *addr);
|
||||
|
@ -525,7 +525,7 @@ static inline void mace_set_timeout(struct net_device *dev)
|
|||
mp->timeout_active = 1;
|
||||
}
|
||||
|
||||
static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
|
||||
static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct mace_data *mp = netdev_priv(dev);
|
||||
volatile struct dbdma_regs __iomem *td = mp->tx_dma;
|
||||
|
|
|
@ -89,7 +89,7 @@ struct mace_frame {
|
|||
|
||||
static int mace_open(struct net_device *dev);
|
||||
static int mace_close(struct net_device *dev);
|
||||
static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
|
||||
static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
|
||||
static void mace_set_multicast(struct net_device *dev);
|
||||
static int mace_set_address(struct net_device *dev, void *addr);
|
||||
static void mace_reset(struct net_device *dev);
|
||||
|
@ -444,7 +444,7 @@ static int mace_close(struct net_device *dev)
|
|||
* Transmit a frame
|
||||
*/
|
||||
|
||||
static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
|
||||
static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct mace_data *mp = netdev_priv(dev);
|
||||
unsigned long flags;
|
||||
|
|
|
@ -225,9 +225,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
|
|||
}
|
||||
|
||||
/* for single fragment packets use build_skb() */
|
||||
if (buff->is_eop) {
|
||||
if (buff->is_eop &&
|
||||
buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
|
||||
skb = build_skb(page_address(buff->page),
|
||||
buff->len + AQ_SKB_ALIGN);
|
||||
AQ_CFG_RX_FRAME_MAX);
|
||||
if (unlikely(!skb)) {
|
||||
err = -ENOMEM;
|
||||
goto err_exit;
|
||||
|
@ -247,18 +248,21 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
|
|||
buff->len - ETH_HLEN,
|
||||
SKB_TRUESIZE(buff->len - ETH_HLEN));
|
||||
|
||||
for (i = 1U, next_ = buff->next,
|
||||
buff_ = &self->buff_ring[next_]; true;
|
||||
next_ = buff_->next,
|
||||
buff_ = &self->buff_ring[next_], ++i) {
|
||||
skb_add_rx_frag(skb, i, buff_->page, 0,
|
||||
buff_->len,
|
||||
SKB_TRUESIZE(buff->len -
|
||||
ETH_HLEN));
|
||||
buff_->is_cleaned = 1;
|
||||
if (!buff->is_eop) {
|
||||
for (i = 1U, next_ = buff->next,
|
||||
buff_ = &self->buff_ring[next_];
|
||||
true; next_ = buff_->next,
|
||||
buff_ = &self->buff_ring[next_], ++i) {
|
||||
skb_add_rx_frag(skb, i,
|
||||
buff_->page, 0,
|
||||
buff_->len,
|
||||
SKB_TRUESIZE(buff->len -
|
||||
ETH_HLEN));
|
||||
buff_->is_cleaned = 1;
|
||||
|
||||
if (buff_->is_eop)
|
||||
break;
|
||||
if (buff_->is_eop)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -12894,19 +12894,6 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void poll_bnx2x(struct net_device *dev)
|
||||
{
|
||||
struct bnx2x *bp = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
for_each_eth_queue(bp, i) {
|
||||
struct bnx2x_fastpath *fp = &bp->fp[i];
|
||||
napi_schedule(&bnx2x_fp(bp, fp->index, napi));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static int bnx2x_validate_addr(struct net_device *dev)
|
||||
{
|
||||
struct bnx2x *bp = netdev_priv(dev);
|
||||
|
@ -13113,9 +13100,6 @@ static const struct net_device_ops bnx2x_netdev_ops = {
|
|||
.ndo_tx_timeout = bnx2x_tx_timeout,
|
||||
.ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid,
|
||||
.ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = poll_bnx2x,
|
||||
#endif
|
||||
.ndo_setup_tc = __bnx2x_setup_tc,
|
||||
#ifdef CONFIG_BNX2X_SRIOV
|
||||
.ndo_set_vf_mac = bnx2x_set_vf_mac,
|
||||
|
|
|
@ -7672,21 +7672,6 @@ static void bnxt_tx_timeout(struct net_device *dev)
|
|||
bnxt_queue_sp_work(bp);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void bnxt_poll_controller(struct net_device *dev)
|
||||
{
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
/* Only process tx rings/combined rings in netpoll mode. */
|
||||
for (i = 0; i < bp->tx_nr_rings; i++) {
|
||||
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
|
||||
|
||||
napi_schedule(&txr->bnapi->napi);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static void bnxt_timer(struct timer_list *t)
|
||||
{
|
||||
struct bnxt *bp = from_timer(bp, t, timer);
|
||||
|
@ -8519,9 +8504,6 @@ static const struct net_device_ops bnxt_netdev_ops = {
|
|||
.ndo_set_vf_link_state = bnxt_set_vf_link_state,
|
||||
.ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
|
||||
.ndo_set_vf_trust = bnxt_set_vf_trust,
|
||||
#endif
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = bnxt_poll_controller,
|
||||
#endif
|
||||
.ndo_setup_tc = bnxt_setup_tc,
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
|
|
|
@ -46,6 +46,9 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
|
|||
}
|
||||
}
|
||||
|
||||
if (i == ARRAY_SIZE(nvm_params))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
|
||||
idx = bp->pf.port_id;
|
||||
else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
|
||||
|
|
|
@ -75,17 +75,23 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void bnxt_tc_parse_vlan(struct bnxt *bp,
|
||||
struct bnxt_tc_actions *actions,
|
||||
const struct tc_action *tc_act)
|
||||
static int bnxt_tc_parse_vlan(struct bnxt *bp,
|
||||
struct bnxt_tc_actions *actions,
|
||||
const struct tc_action *tc_act)
|
||||
{
|
||||
if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) {
|
||||
switch (tcf_vlan_action(tc_act)) {
|
||||
case TCA_VLAN_ACT_POP:
|
||||
actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
|
||||
} else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) {
|
||||
break;
|
||||
case TCA_VLAN_ACT_PUSH:
|
||||
actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
|
||||
actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
|
||||
actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
|
||||
|
@ -134,7 +140,9 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
|
|||
|
||||
/* Push/pop VLAN */
|
||||
if (is_tcf_vlan(tc_act)) {
|
||||
bnxt_tc_parse_vlan(bp, actions, tc_act);
|
||||
rc = bnxt_tc_parse_vlan(bp, actions, tc_act);
|
||||
if (rc)
|
||||
return rc;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -332,7 +332,7 @@ static int ep93xx_poll(struct napi_struct *napi, int budget)
|
|||
return rx;
|
||||
}
|
||||
|
||||
static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
static netdev_tx_t ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct ep93xx_priv *ep = netdev_priv(dev);
|
||||
struct ep93xx_tdesc *txd;
|
||||
|
|
|
@ -113,7 +113,7 @@ struct net_local {
|
|||
|
||||
/* Index to functions, as function prototypes. */
|
||||
static int net_open(struct net_device *dev);
|
||||
static int net_send_packet(struct sk_buff *skb, struct net_device *dev);
|
||||
static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev);
|
||||
static irqreturn_t net_interrupt(int irq, void *dev_id);
|
||||
static void set_multicast_list(struct net_device *dev);
|
||||
static void net_rx(struct net_device *dev);
|
||||
|
@ -324,7 +324,7 @@ net_open(struct net_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
static netdev_tx_t
|
||||
net_send_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct net_local *lp = netdev_priv(dev);
|
||||
|
|
|
@ -64,7 +64,8 @@ static unsigned int net_debug = NET_DEBUG;
|
|||
#define RX_AREA_END 0x0fc00
|
||||
|
||||
static int ether1_open(struct net_device *dev);
|
||||
static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev);
|
||||
static netdev_tx_t ether1_sendpacket(struct sk_buff *skb,
|
||||
struct net_device *dev);
|
||||
static irqreturn_t ether1_interrupt(int irq, void *dev_id);
|
||||
static int ether1_close(struct net_device *dev);
|
||||
static void ether1_setmulticastlist(struct net_device *dev);
|
||||
|
@ -667,7 +668,7 @@ ether1_timeout(struct net_device *dev)
|
|||
netif_wake_queue(dev);
|
||||
}
|
||||
|
||||
static int
|
||||
static netdev_tx_t
|
||||
ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr;
|
||||
|
|
|
@ -347,7 +347,7 @@ static const char init_setup[] =
|
|||
0x7f /* *multi IA */ };
|
||||
|
||||
static int i596_open(struct net_device *dev);
|
||||
static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
static irqreturn_t i596_interrupt(int irq, void *dev_id);
|
||||
static int i596_close(struct net_device *dev);
|
||||
static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
|
||||
|
@ -966,7 +966,7 @@ static void i596_tx_timeout (struct net_device *dev)
|
|||
}
|
||||
|
||||
|
||||
static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct i596_private *lp = netdev_priv(dev);
|
||||
struct tx_cmd *tx_cmd;
|
||||
|
|
|
@ -121,7 +121,8 @@ static int sun3_82586_probe1(struct net_device *dev,int ioaddr);
|
|||
static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id);
|
||||
static int sun3_82586_open(struct net_device *dev);
|
||||
static int sun3_82586_close(struct net_device *dev);
|
||||
static int sun3_82586_send_packet(struct sk_buff *,struct net_device *);
|
||||
static netdev_tx_t sun3_82586_send_packet(struct sk_buff *,
|
||||
struct net_device *);
|
||||
static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev);
|
||||
static void set_multicast_list(struct net_device *dev);
|
||||
static void sun3_82586_timeout(struct net_device *dev);
|
||||
|
@ -1002,7 +1003,8 @@ static void sun3_82586_timeout(struct net_device *dev)
|
|||
* send frame
|
||||
*/
|
||||
|
||||
static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
static netdev_tx_t
|
||||
sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
int len,i;
|
||||
#ifndef NO_NOPCOMMANDS
|
||||
|
|
|
@ -2677,12 +2677,17 @@ static int emac_init_phy(struct emac_instance *dev)
|
|||
if (of_phy_is_fixed_link(np)) {
|
||||
int res = emac_dt_mdio_probe(dev);
|
||||
|
||||
if (!res) {
|
||||
res = of_phy_register_fixed_link(np);
|
||||
if (res)
|
||||
mdiobus_unregister(dev->mii_bus);
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
res = of_phy_register_fixed_link(np);
|
||||
dev->phy_dev = of_phy_find_device(np);
|
||||
if (res || !dev->phy_dev) {
|
||||
mdiobus_unregister(dev->mii_bus);
|
||||
return res ? res : -EINVAL;
|
||||
}
|
||||
return res;
|
||||
emac_adjust_link(dev->ndev);
|
||||
put_device(&dev->phy_dev->mdio.dev);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -504,9 +504,6 @@ void fm10k_update_stats(struct fm10k_intfc *interface);
|
|||
void fm10k_service_event_schedule(struct fm10k_intfc *interface);
|
||||
void fm10k_macvlan_schedule(struct fm10k_intfc *interface);
|
||||
void fm10k_update_rx_drop_en(struct fm10k_intfc *interface);
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
void fm10k_netpoll(struct net_device *netdev);
|
||||
#endif
|
||||
|
||||
/* Netdev */
|
||||
struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info);
|
||||
|
|
|
@ -1648,9 +1648,6 @@ static const struct net_device_ops fm10k_netdev_ops = {
|
|||
.ndo_udp_tunnel_del = fm10k_udp_tunnel_del,
|
||||
.ndo_dfwd_add_station = fm10k_dfwd_add_station,
|
||||
.ndo_dfwd_del_station = fm10k_dfwd_del_station,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = fm10k_netpoll,
|
||||
#endif
|
||||
.ndo_features_check = fm10k_features_check,
|
||||
};
|
||||
|
||||
|
|
|
@ -1210,28 +1210,6 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
/**
|
||||
* fm10k_netpoll - A Polling 'interrupt' handler
|
||||
* @netdev: network interface device structure
|
||||
*
|
||||
* This is used by netconsole to send skbs without having to re-enable
|
||||
* interrupts. It's not called while the normal interrupt routine is executing.
|
||||
**/
|
||||
void fm10k_netpoll(struct net_device *netdev)
|
||||
{
|
||||
struct fm10k_intfc *interface = netdev_priv(netdev);
|
||||
int i;
|
||||
|
||||
/* if interface is down do nothing */
|
||||
if (test_bit(__FM10K_DOWN, interface->state))
|
||||
return;
|
||||
|
||||
for (i = 0; i < interface->num_q_vectors; i++)
|
||||
fm10k_msix_clean_rings(0, interface->q_vector[i]);
|
||||
}
|
||||
|
||||
#endif
|
||||
#define FM10K_ERR_MSG(type) case (type): error = #type; break
|
||||
static void fm10k_handle_fault(struct fm10k_intfc *interface, int type,
|
||||
struct fm10k_fault *fault)
|
||||
|
|
|
@ -396,29 +396,6 @@ static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
|
|||
adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
/**
|
||||
* i40evf_netpoll - A Polling 'interrupt' handler
|
||||
* @netdev: network interface device structure
|
||||
*
|
||||
* This is used by netconsole to send skbs without having to re-enable
|
||||
* interrupts. It's not called while the normal interrupt routine is executing.
|
||||
**/
|
||||
static void i40evf_netpoll(struct net_device *netdev)
|
||||
{
|
||||
struct i40evf_adapter *adapter = netdev_priv(netdev);
|
||||
int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
|
||||
int i;
|
||||
|
||||
/* if interface is down do nothing */
|
||||
if (test_bit(__I40E_VSI_DOWN, adapter->vsi.state))
|
||||
return;
|
||||
|
||||
for (i = 0; i < q_vectors; i++)
|
||||
i40evf_msix_clean_rings(0, &adapter->q_vectors[i]);
|
||||
}
|
||||
|
||||
#endif
|
||||
/**
|
||||
* i40evf_irq_affinity_notify - Callback for affinity changes
|
||||
* @notify: context as to what irq was changed
|
||||
|
@ -3229,9 +3206,6 @@ static const struct net_device_ops i40evf_netdev_ops = {
|
|||
.ndo_features_check = i40evf_features_check,
|
||||
.ndo_fix_features = i40evf_fix_features,
|
||||
.ndo_set_features = i40evf_set_features,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = i40evf_netpoll,
|
||||
#endif
|
||||
.ndo_setup_tc = i40evf_setup_tc,
|
||||
};
|
||||
|
||||
|
|
|
@ -4806,30 +4806,6 @@ void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
|
|||
stats->rx_length_errors = vsi_stats->rx_length_errors;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
/**
|
||||
* ice_netpoll - polling "interrupt" handler
|
||||
* @netdev: network interface device structure
|
||||
*
|
||||
* Used by netconsole to send skbs without having to re-enable interrupts.
|
||||
* This is not called in the normal interrupt path.
|
||||
*/
|
||||
static void ice_netpoll(struct net_device *netdev)
|
||||
{
|
||||
struct ice_netdev_priv *np = netdev_priv(netdev);
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
int i;
|
||||
|
||||
if (test_bit(__ICE_DOWN, vsi->state) ||
|
||||
!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
|
||||
return;
|
||||
|
||||
for (i = 0; i < vsi->num_q_vectors; i++)
|
||||
ice_msix_clean_rings(0, vsi->q_vectors[i]);
|
||||
}
|
||||
#endif /* CONFIG_NET_POLL_CONTROLLER */
|
||||
|
||||
/**
|
||||
* ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
|
||||
* @vsi: VSI having NAPI disabled
|
||||
|
@ -5497,9 +5473,6 @@ static const struct net_device_ops ice_netdev_ops = {
|
|||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_change_mtu = ice_change_mtu,
|
||||
.ndo_get_stats64 = ice_get_stats64,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = ice_netpoll,
|
||||
#endif /* CONFIG_NET_POLL_CONTROLLER */
|
||||
.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
|
||||
.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
|
||||
.ndo_set_features = ice_set_features,
|
||||
|
|
|
@ -205,10 +205,6 @@ static struct notifier_block dca_notifier = {
|
|||
.priority = 0
|
||||
};
|
||||
#endif
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
/* for netdump / net console */
|
||||
static void igb_netpoll(struct net_device *);
|
||||
#endif
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
static unsigned int max_vfs;
|
||||
module_param(max_vfs, uint, 0);
|
||||
|
@ -2881,9 +2877,6 @@ static const struct net_device_ops igb_netdev_ops = {
|
|||
.ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
|
||||
.ndo_set_vf_trust = igb_ndo_set_vf_trust,
|
||||
.ndo_get_vf_config = igb_ndo_get_vf_config,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = igb_netpoll,
|
||||
#endif
|
||||
.ndo_fix_features = igb_fix_features,
|
||||
.ndo_set_features = igb_set_features,
|
||||
.ndo_fdb_add = igb_ndo_fdb_add,
|
||||
|
@ -9053,29 +9046,6 @@ static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
/* Polling 'interrupt' - used by things like netconsole to send skbs
|
||||
* without having to re-enable interrupts. It's not called while
|
||||
* the interrupt routine is executing.
|
||||
*/
|
||||
static void igb_netpoll(struct net_device *netdev)
|
||||
{
|
||||
struct igb_adapter *adapter = netdev_priv(netdev);
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
struct igb_q_vector *q_vector;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adapter->num_q_vectors; i++) {
|
||||
q_vector = adapter->q_vector[i];
|
||||
if (adapter->flags & IGB_FLAG_HAS_MSIX)
|
||||
wr32(E1000_EIMC, q_vector->eims_value);
|
||||
else
|
||||
igb_irq_disable(adapter);
|
||||
napi_schedule(&q_vector->napi);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_NET_POLL_CONTROLLER */
|
||||
|
||||
/**
|
||||
* igb_io_error_detected - called when PCI error is detected
|
||||
* @pdev: Pointer to PCI device
|
||||
|
|
|
@ -81,11 +81,6 @@ static int ixgb_vlan_rx_kill_vid(struct net_device *netdev,
|
|||
__be16 proto, u16 vid);
|
||||
static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
/* for netdump / net console */
|
||||
static void ixgb_netpoll(struct net_device *dev);
|
||||
#endif
|
||||
|
||||
static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
|
||||
enum pci_channel_state state);
|
||||
static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
|
||||
|
@ -348,9 +343,6 @@ static const struct net_device_ops ixgb_netdev_ops = {
|
|||
.ndo_tx_timeout = ixgb_tx_timeout,
|
||||
.ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid,
|
||||
.ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = ixgb_netpoll,
|
||||
#endif
|
||||
.ndo_fix_features = ixgb_fix_features,
|
||||
.ndo_set_features = ixgb_set_features,
|
||||
};
|
||||
|
@ -2195,23 +2187,6 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
|
|||
ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
/*
|
||||
* Polling 'interrupt' - used by things like netconsole to send skbs
|
||||
* without having to re-enable interrupts. It's not called while
|
||||
* the interrupt routine is executing.
|
||||
*/
|
||||
|
||||
static void ixgb_netpoll(struct net_device *dev)
|
||||
{
|
||||
struct ixgb_adapter *adapter = netdev_priv(dev);
|
||||
|
||||
disable_irq(adapter->pdev->irq);
|
||||
ixgb_intr(adapter->pdev->irq, dev);
|
||||
enable_irq(adapter->pdev->irq);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* ixgb_io_error_detected - called when PCI error is detected
|
||||
* @pdev: pointer to pci device with error
|
||||
|
|
|
@ -8768,28 +8768,6 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev)
|
|||
return err;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
/*
|
||||
* Polling 'interrupt' - used by things like netconsole to send skbs
|
||||
* without having to re-enable interrupts. It's not called while
|
||||
* the interrupt routine is executing.
|
||||
*/
|
||||
static void ixgbe_netpoll(struct net_device *netdev)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||
int i;
|
||||
|
||||
/* if interface is down do nothing */
|
||||
if (test_bit(__IXGBE_DOWN, &adapter->state))
|
||||
return;
|
||||
|
||||
/* loop through and schedule all active queues */
|
||||
for (i = 0; i < adapter->num_q_vectors; i++)
|
||||
ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
|
||||
struct ixgbe_ring *ring)
|
||||
{
|
||||
|
@ -10251,9 +10229,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
|
|||
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
|
||||
.ndo_get_stats64 = ixgbe_get_stats64,
|
||||
.ndo_setup_tc = __ixgbe_setup_tc,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = ixgbe_netpoll,
|
||||
#endif
|
||||
#ifdef IXGBE_FCOE
|
||||
.ndo_select_queue = ixgbe_select_queue,
|
||||
.ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
|
||||
|
|
|
@ -4233,24 +4233,6 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
/* Polling 'interrupt' - used by things like netconsole to send skbs
|
||||
* without having to re-enable interrupts. It's not called while
|
||||
* the interrupt routine is executing.
|
||||
*/
|
||||
static void ixgbevf_netpoll(struct net_device *netdev)
|
||||
{
|
||||
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
|
||||
int i;
|
||||
|
||||
/* if interface is down do nothing */
|
||||
if (test_bit(__IXGBEVF_DOWN, &adapter->state))
|
||||
return;
|
||||
for (i = 0; i < adapter->num_rx_queues; i++)
|
||||
ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
|
||||
}
|
||||
#endif /* CONFIG_NET_POLL_CONTROLLER */
|
||||
|
||||
static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
{
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
|
@ -4482,9 +4464,6 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
|
|||
.ndo_tx_timeout = ixgbevf_tx_timeout,
|
||||
.ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
|
||||
.ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = ixgbevf_netpoll,
|
||||
#endif
|
||||
.ndo_features_check = ixgbevf_features_check,
|
||||
.ndo_bpf = ixgbevf_xdp,
|
||||
};
|
||||
|
|
|
@ -1890,8 +1890,8 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
|
|||
if (!data || !(rx_desc->buf_phys_addr))
|
||||
continue;
|
||||
|
||||
dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
|
||||
MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
|
||||
dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
__free_page(data);
|
||||
}
|
||||
}
|
||||
|
@ -2008,8 +2008,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
|
|||
skb_add_rx_frag(rxq->skb, frag_num, page,
|
||||
frag_offset, frag_size,
|
||||
PAGE_SIZE);
|
||||
dma_unmap_single(dev->dev.parent, phys_addr,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
dma_unmap_page(dev->dev.parent, phys_addr,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
rxq->left_size -= frag_size;
|
||||
}
|
||||
} else {
|
||||
|
@ -2039,9 +2039,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
|
|||
frag_offset, frag_size,
|
||||
PAGE_SIZE);
|
||||
|
||||
dma_unmap_single(dev->dev.parent, phys_addr,
|
||||
PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
dma_unmap_page(dev->dev.parent, phys_addr,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
|
||||
rxq->left_size -= frag_size;
|
||||
}
|
||||
|
|
|
@ -3055,10 +3055,12 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
|
|||
cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
|
||||
}
|
||||
|
||||
cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
|
||||
if (cause_tx) {
|
||||
cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
|
||||
mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
|
||||
if (port->has_tx_irqs) {
|
||||
cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
|
||||
if (cause_tx) {
|
||||
cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
|
||||
mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
|
||||
}
|
||||
}
|
||||
|
||||
/* Process RX packets */
|
||||
|
|
|
@ -1286,20 +1286,6 @@ static void mlx4_en_do_set_rx_mode(struct work_struct *work)
|
|||
mutex_unlock(&mdev->state_lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void mlx4_en_netpoll(struct net_device *dev)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_cq *cq;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->tx_ring_num[TX]; i++) {
|
||||
cq = priv->tx_cq[TX][i];
|
||||
napi_schedule(&cq->napi);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
|
||||
{
|
||||
u64 reg_id;
|
||||
|
@ -2946,9 +2932,6 @@ static const struct net_device_ops mlx4_netdev_ops = {
|
|||
.ndo_tx_timeout = mlx4_en_tx_timeout,
|
||||
.ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
|
||||
.ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = mlx4_en_netpoll,
|
||||
#endif
|
||||
.ndo_set_features = mlx4_en_set_features,
|
||||
.ndo_fix_features = mlx4_en_fix_features,
|
||||
.ndo_setup_tc = __mlx4_en_setup_tc,
|
||||
|
@ -2983,9 +2966,6 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
|
|||
.ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
|
||||
.ndo_get_vf_stats = mlx4_en_get_vf_stats,
|
||||
.ndo_get_vf_config = mlx4_en_get_vf_config,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = mlx4_en_netpoll,
|
||||
#endif
|
||||
.ndo_set_features = mlx4_en_set_features,
|
||||
.ndo_fix_features = mlx4_en_fix_features,
|
||||
.ndo_setup_tc = __mlx4_en_setup_tc,
|
||||
|
|
|
@ -240,7 +240,8 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
|
|||
struct mlx4_dev *dev = &priv->dev;
|
||||
struct mlx4_eq *eq = &priv->eq_table.eq[vec];
|
||||
|
||||
if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
|
||||
if (!cpumask_available(eq->affinity_mask) ||
|
||||
cpumask_empty(eq->affinity_mask))
|
||||
return;
|
||||
|
||||
hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
|
||||
|
|
|
@ -206,7 +206,7 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
|
|||
u8 own;
|
||||
|
||||
do {
|
||||
own = ent->lay->status_own;
|
||||
own = READ_ONCE(ent->lay->status_own);
|
||||
if (!(own & CMD_OWNER_HW)) {
|
||||
ent->ret = 0;
|
||||
return;
|
||||
|
|
|
@ -183,12 +183,13 @@ static const struct tlsdev_ops mlx5e_tls_ops = {
|
|||
|
||||
void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
|
||||
{
|
||||
u32 caps = mlx5_accel_tls_device_caps(priv->mdev);
|
||||
struct net_device *netdev = priv->netdev;
|
||||
u32 caps;
|
||||
|
||||
if (!mlx5_accel_is_tls_device(priv->mdev))
|
||||
return;
|
||||
|
||||
caps = mlx5_accel_tls_device_caps(priv->mdev);
|
||||
if (caps & MLX5_ACCEL_TLS_TX) {
|
||||
netdev->features |= NETIF_F_HW_TLS_TX;
|
||||
netdev->hw_features |= NETIF_F_HW_TLS_TX;
|
||||
|
|
|
@ -4315,22 +4315,6 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
/* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
|
||||
* reenabling interrupts.
|
||||
*/
|
||||
static void mlx5e_netpoll(struct net_device *dev)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
struct mlx5e_channels *chs = &priv->channels;
|
||||
|
||||
int i;
|
||||
|
||||
for (i = 0; i < chs->num; i++)
|
||||
napi_schedule(&chs->c[i]->napi);
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct net_device_ops mlx5e_netdev_ops = {
|
||||
.ndo_open = mlx5e_open,
|
||||
.ndo_stop = mlx5e_close,
|
||||
|
@ -4356,9 +4340,6 @@ static const struct net_device_ops mlx5e_netdev_ops = {
|
|||
#ifdef CONFIG_MLX5_EN_ARFS
|
||||
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
|
||||
#endif
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = mlx5e_netpoll,
|
||||
#endif
|
||||
#ifdef CONFIG_MLX5_ESWITCH
|
||||
/* SRIOV E-Switch NDOs */
|
||||
.ndo_set_vf_mac = mlx5e_set_vf_mac,
|
||||
|
|
|
@ -509,7 +509,7 @@ static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn,
|
|||
|
||||
sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
|
||||
|
||||
if (next_state == MLX5_RQC_STATE_RDY) {
|
||||
if (next_state == MLX5_SQC_STATE_RDY) {
|
||||
MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq);
|
||||
MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca);
|
||||
}
|
||||
|
|
|
@ -44,8 +44,8 @@
|
|||
#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
|
||||
|
||||
#define MLXSW_SP1_FWREV_MAJOR 13
|
||||
#define MLXSW_SP1_FWREV_MINOR 1702
|
||||
#define MLXSW_SP1_FWREV_SUBMINOR 6
|
||||
#define MLXSW_SP1_FWREV_MINOR 1703
|
||||
#define MLXSW_SP1_FWREV_SUBMINOR 4
|
||||
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
|
||||
|
||||
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
|
||||
|
|
|
@ -91,7 +91,7 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
|
|||
struct sk_buff *skb;
|
||||
struct net_device *dev;
|
||||
u32 *buf;
|
||||
int sz, len;
|
||||
int sz, len, buf_len;
|
||||
u32 ifh[4];
|
||||
u32 val;
|
||||
struct frame_info info;
|
||||
|
@ -116,14 +116,20 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
|
|||
err = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
buf = (u32 *)skb_put(skb, info.len);
|
||||
buf_len = info.len - ETH_FCS_LEN;
|
||||
buf = (u32 *)skb_put(skb, buf_len);
|
||||
|
||||
len = 0;
|
||||
do {
|
||||
sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
|
||||
*buf++ = val;
|
||||
len += sz;
|
||||
} while ((sz == 4) && (len < info.len));
|
||||
} while (len < buf_len);
|
||||
|
||||
/* Read the FCS and discard it */
|
||||
sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
|
||||
/* Update the statistics if part of the FCS was read before */
|
||||
len -= ETH_FCS_LEN - sz;
|
||||
|
||||
if (sz < 0) {
|
||||
err = sz;
|
||||
|
|
|
@ -3146,21 +3146,6 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
|
|||
return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void nfp_net_netpoll(struct net_device *netdev)
|
||||
{
|
||||
struct nfp_net *nn = netdev_priv(netdev);
|
||||
int i;
|
||||
|
||||
/* nfp_net's NAPIs are statically allocated so even if there is a race
|
||||
* with reconfig path this will simply try to schedule some disabled
|
||||
* NAPI instances.
|
||||
*/
|
||||
for (i = 0; i < nn->dp.num_stack_tx_rings; i++)
|
||||
napi_schedule_irqoff(&nn->r_vecs[i].napi);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void nfp_net_stat64(struct net_device *netdev,
|
||||
struct rtnl_link_stats64 *stats)
|
||||
{
|
||||
|
@ -3519,9 +3504,6 @@ const struct net_device_ops nfp_net_netdev_ops = {
|
|||
.ndo_get_stats64 = nfp_net_stat64,
|
||||
.ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
|
||||
.ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = nfp_net_netpoll,
|
||||
#endif
|
||||
.ndo_set_vf_mac = nfp_app_set_vf_mac,
|
||||
.ndo_set_vf_vlan = nfp_app_set_vf_vlan,
|
||||
.ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
|
||||
|
|
|
@ -190,10 +190,8 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
|
|||
|
||||
static void
|
||||
qed_dcbx_set_params(struct qed_dcbx_results *p_data,
|
||||
struct qed_hw_info *p_info,
|
||||
bool enable,
|
||||
u8 prio,
|
||||
u8 tc,
|
||||
struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
|
||||
bool enable, u8 prio, u8 tc,
|
||||
enum dcbx_protocol_type type,
|
||||
enum qed_pci_personality personality)
|
||||
{
|
||||
|
@ -206,19 +204,30 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
|
|||
else
|
||||
p_data->arr[type].update = DONT_UPDATE_DCB_DSCP;
|
||||
|
||||
/* Do not add vlan tag 0 when DCB is enabled and port in UFP/OV mode */
|
||||
if ((test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits) ||
|
||||
test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)))
|
||||
p_data->arr[type].dont_add_vlan0 = true;
|
||||
|
||||
/* QM reconf data */
|
||||
if (p_info->personality == personality)
|
||||
qed_hw_info_set_offload_tc(p_info, tc);
|
||||
if (p_hwfn->hw_info.personality == personality)
|
||||
qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc);
|
||||
|
||||
/* Configure dcbx vlan priority in doorbell block for roce EDPM */
|
||||
if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
|
||||
type == DCBX_PROTOCOL_ROCE) {
|
||||
qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
|
||||
qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_PCP_BB_K2, prio << 1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Update app protocol data and hw_info fields with the TLV info */
|
||||
static void
|
||||
qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
|
||||
struct qed_hwfn *p_hwfn,
|
||||
bool enable,
|
||||
u8 prio, u8 tc, enum dcbx_protocol_type type)
|
||||
struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
|
||||
bool enable, u8 prio, u8 tc,
|
||||
enum dcbx_protocol_type type)
|
||||
{
|
||||
struct qed_hw_info *p_info = &p_hwfn->hw_info;
|
||||
enum qed_pci_personality personality;
|
||||
enum dcbx_protocol_type id;
|
||||
int i;
|
||||
|
@ -231,7 +240,7 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
|
|||
|
||||
personality = qed_dcbx_app_update[i].personality;
|
||||
|
||||
qed_dcbx_set_params(p_data, p_info, enable,
|
||||
qed_dcbx_set_params(p_data, p_hwfn, p_ptt, enable,
|
||||
prio, tc, type, personality);
|
||||
}
|
||||
}
|
||||
|
@ -265,7 +274,7 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
|
|||
* reconfiguring QM. Get protocol specific data for PF update ramrod command.
|
||||
*/
|
||||
static int
|
||||
qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
|
||||
qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
|
||||
struct qed_dcbx_results *p_data,
|
||||
struct dcbx_app_priority_entry *p_tbl,
|
||||
u32 pri_tc_tbl, int count, u8 dcbx_version)
|
||||
|
@ -309,7 +318,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
|
|||
enable = true;
|
||||
}
|
||||
|
||||
qed_dcbx_update_app_info(p_data, p_hwfn, enable,
|
||||
qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable,
|
||||
priority, tc, type);
|
||||
}
|
||||
}
|
||||
|
@ -331,7 +340,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
|
|||
continue;
|
||||
|
||||
enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
|
||||
qed_dcbx_update_app_info(p_data, p_hwfn, enable,
|
||||
qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable,
|
||||
priority, tc, type);
|
||||
}
|
||||
|
||||
|
@ -341,7 +350,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
|
|||
/* Parse app TLV's to update TC information in hw_info structure for
|
||||
* reconfiguring QM. Get protocol specific data for PF update ramrod command.
|
||||
*/
|
||||
static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
|
||||
static int
|
||||
qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
||||
{
|
||||
struct dcbx_app_priority_feature *p_app;
|
||||
struct dcbx_app_priority_entry *p_tbl;
|
||||
|
@ -365,7 +375,7 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
|
|||
p_info = &p_hwfn->hw_info;
|
||||
num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
|
||||
|
||||
rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
|
||||
rc = qed_dcbx_process_tlv(p_hwfn, p_ptt, &data, p_tbl, pri_tc_tbl,
|
||||
num_entries, dcbx_version);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
@ -891,7 +901,7 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
|
|||
return rc;
|
||||
|
||||
if (type == QED_DCBX_OPERATIONAL_MIB) {
|
||||
rc = qed_dcbx_process_mib_info(p_hwfn);
|
||||
rc = qed_dcbx_process_mib_info(p_hwfn, p_ptt);
|
||||
if (!rc) {
|
||||
/* reconfigure tcs of QM queues according
|
||||
* to negotiation results
|
||||
|
@ -954,6 +964,7 @@ static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
|
|||
p_data->dcb_enable_flag = p_src->arr[type].enable;
|
||||
p_data->dcb_priority = p_src->arr[type].priority;
|
||||
p_data->dcb_tc = p_src->arr[type].tc;
|
||||
p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0;
|
||||
}
|
||||
|
||||
/* Set pf update ramrod command params */
|
||||
|
|
|
@ -55,6 +55,7 @@ struct qed_dcbx_app_data {
|
|||
u8 update; /* Update indication */
|
||||
u8 priority; /* Priority */
|
||||
u8 tc; /* Traffic Class */
|
||||
bool dont_add_vlan0; /* Do not insert a vlan tag with id 0 */
|
||||
};
|
||||
|
||||
#define QED_DCBX_VERSION_DISABLED 0
|
||||
|
|
|
@ -1706,7 +1706,7 @@ static int qed_vf_start(struct qed_hwfn *p_hwfn,
|
|||
int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
|
||||
{
|
||||
struct qed_load_req_params load_req_params;
|
||||
u32 load_code, param, drv_mb_param;
|
||||
u32 load_code, resp, param, drv_mb_param;
|
||||
bool b_default_mtu = true;
|
||||
struct qed_hwfn *p_hwfn;
|
||||
int rc = 0, mfw_rc, i;
|
||||
|
@ -1852,6 +1852,19 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
|
|||
|
||||
if (IS_PF(cdev)) {
|
||||
p_hwfn = QED_LEADING_HWFN(cdev);
|
||||
|
||||
/* Get pre-negotiated values for stag, bandwidth etc. */
|
||||
DP_VERBOSE(p_hwfn,
|
||||
QED_MSG_SPQ,
|
||||
"Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
|
||||
drv_mb_param = 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET;
|
||||
rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
|
||||
DRV_MSG_CODE_GET_OEM_UPDATES,
|
||||
drv_mb_param, &resp, ¶m);
|
||||
if (rc)
|
||||
DP_NOTICE(p_hwfn,
|
||||
"Failed to send GET_OEM_UPDATES attention request\n");
|
||||
|
||||
drv_mb_param = STORM_FW_VERSION;
|
||||
rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
|
||||
DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
|
||||
|
|
|
@ -12414,6 +12414,7 @@ struct public_drv_mb {
|
|||
#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000
|
||||
#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000
|
||||
#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000
|
||||
#define DRV_MSG_CODE_GET_OEM_UPDATES 0x41000000
|
||||
|
||||
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
|
||||
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
|
||||
|
@ -12541,6 +12542,9 @@ struct public_drv_mb {
|
|||
#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1
|
||||
#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
|
||||
|
||||
#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1
|
||||
#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0
|
||||
|
||||
#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
|
||||
#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
|
||||
#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
|
||||
|
|
|
@ -1581,13 +1581,29 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
|||
p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
|
||||
FUNC_MF_CFG_OV_STAG_MASK;
|
||||
p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
|
||||
if ((p_hwfn->hw_info.hw_mode & BIT(MODE_MF_SD)) &&
|
||||
(p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET)) {
|
||||
qed_wr(p_hwfn, p_ptt,
|
||||
NIG_REG_LLH_FUNC_TAG_VALUE, p_hwfn->hw_info.ovlan);
|
||||
if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) {
|
||||
if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) {
|
||||
qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
|
||||
p_hwfn->hw_info.ovlan);
|
||||
qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
|
||||
|
||||
/* Configure DB to add external vlan to EDPM packets */
|
||||
qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
|
||||
qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
|
||||
p_hwfn->hw_info.ovlan);
|
||||
} else {
|
||||
qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
|
||||
qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
|
||||
qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
|
||||
qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
|
||||
}
|
||||
|
||||
qed_sp_pf_update_stag(p_hwfn);
|
||||
}
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
|
||||
p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
|
||||
|
||||
/* Acknowledge the MFW */
|
||||
qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
|
||||
&resp, ¶m);
|
||||
|
|
|
@ -216,6 +216,12 @@
|
|||
0x00c000UL
|
||||
#define DORQ_REG_IFEN \
|
||||
0x100040UL
|
||||
#define DORQ_REG_TAG1_OVRD_MODE \
|
||||
0x1008b4UL
|
||||
#define DORQ_REG_PF_PCP_BB_K2 \
|
||||
0x1008c4UL
|
||||
#define DORQ_REG_PF_EXT_VID_BB_K2 \
|
||||
0x1008c8UL
|
||||
#define DORQ_REG_DB_DROP_REASON \
|
||||
0x100a2cUL
|
||||
#define DORQ_REG_DB_DROP_DETAILS \
|
||||
|
|
|
@ -4071,6 +4071,15 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
|
|||
phy_speed_up(dev->phydev);
|
||||
|
||||
genphy_soft_reset(dev->phydev);
|
||||
|
||||
/* It was reported that chip version 33 ends up with 10MBit/Half on a
|
||||
* 1GBit link after resuming from S3. For whatever reason the PHY on
|
||||
* this chip doesn't properly start a renegotiation when soft-reset.
|
||||
* Explicitly requesting a renegotiation fixes this.
|
||||
*/
|
||||
if (tp->mac_version == RTL_GIGA_MAC_VER_33 &&
|
||||
dev->phydev->autoneg == AUTONEG_ENABLE)
|
||||
phy_restart_aneg(dev->phydev);
|
||||
}
|
||||
|
||||
static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
|
||||
|
|
|
@ -428,6 +428,7 @@ enum EIS_BIT {
|
|||
EIS_CULF1 = 0x00000080,
|
||||
EIS_TFFF = 0x00000100,
|
||||
EIS_QFS = 0x00010000,
|
||||
EIS_RESERVED = (GENMASK(31, 17) | GENMASK(15, 11)),
|
||||
};
|
||||
|
||||
/* RIC0 */
|
||||
|
@ -472,6 +473,7 @@ enum RIS0_BIT {
|
|||
RIS0_FRF15 = 0x00008000,
|
||||
RIS0_FRF16 = 0x00010000,
|
||||
RIS0_FRF17 = 0x00020000,
|
||||
RIS0_RESERVED = GENMASK(31, 18),
|
||||
};
|
||||
|
||||
/* RIC1 */
|
||||
|
@ -528,6 +530,7 @@ enum RIS2_BIT {
|
|||
RIS2_QFF16 = 0x00010000,
|
||||
RIS2_QFF17 = 0x00020000,
|
||||
RIS2_RFFF = 0x80000000,
|
||||
RIS2_RESERVED = GENMASK(30, 18),
|
||||
};
|
||||
|
||||
/* TIC */
|
||||
|
@ -544,6 +547,7 @@ enum TIS_BIT {
|
|||
TIS_FTF1 = 0x00000002, /* Undocumented? */
|
||||
TIS_TFUF = 0x00000100,
|
||||
TIS_TFWF = 0x00000200,
|
||||
TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4))
|
||||
};
|
||||
|
||||
/* ISS */
|
||||
|
@ -617,6 +621,7 @@ enum GIC_BIT {
|
|||
enum GIS_BIT {
|
||||
GIS_PTCF = 0x00000001, /* Undocumented? */
|
||||
GIS_PTMF = 0x00000004,
|
||||
GIS_RESERVED = GENMASK(15, 10),
|
||||
};
|
||||
|
||||
/* GIE (R-Car Gen3 only) */
|
||||
|
|
|
@ -739,10 +739,11 @@ static void ravb_error_interrupt(struct net_device *ndev)
|
|||
u32 eis, ris2;
|
||||
|
||||
eis = ravb_read(ndev, EIS);
|
||||
ravb_write(ndev, ~EIS_QFS, EIS);
|
||||
ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
|
||||
if (eis & EIS_QFS) {
|
||||
ris2 = ravb_read(ndev, RIS2);
|
||||
ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2);
|
||||
ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
|
||||
RIS2);
|
||||
|
||||
/* Receive Descriptor Empty int */
|
||||
if (ris2 & RIS2_QFF0)
|
||||
|
@ -795,7 +796,7 @@ static bool ravb_timestamp_interrupt(struct net_device *ndev)
|
|||
u32 tis = ravb_read(ndev, TIS);
|
||||
|
||||
if (tis & TIS_TFUF) {
|
||||
ravb_write(ndev, ~TIS_TFUF, TIS);
|
||||
ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
|
||||
ravb_get_tx_tstamp(ndev);
|
||||
return true;
|
||||
}
|
||||
|
@ -930,7 +931,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
|
|||
/* Processing RX Descriptor Ring */
|
||||
if (ris0 & mask) {
|
||||
/* Clear RX interrupt */
|
||||
ravb_write(ndev, ~mask, RIS0);
|
||||
ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
|
||||
if (ravb_rx(ndev, "a, q))
|
||||
goto out;
|
||||
}
|
||||
|
@ -938,7 +939,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
|
|||
if (tis & mask) {
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
/* Clear TX interrupt */
|
||||
ravb_write(ndev, ~mask, TIS);
|
||||
ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
|
||||
ravb_tx_free(ndev, q, true);
|
||||
netif_wake_subqueue(ndev, q);
|
||||
mmiowb();
|
||||
|
|
|
@ -315,7 +315,7 @@ void ravb_ptp_interrupt(struct net_device *ndev)
|
|||
}
|
||||
}
|
||||
|
||||
ravb_write(ndev, ~gis, GIS);
|
||||
ravb_write(ndev, ~(gis | GIS_RESERVED), GIS);
|
||||
}
|
||||
|
||||
void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
|
||||
|
|
|
@ -77,7 +77,8 @@ static void ether3_setmulticastlist(struct net_device *dev);
|
|||
static int ether3_rx(struct net_device *dev, unsigned int maxcnt);
|
||||
static void ether3_tx(struct net_device *dev);
|
||||
static int ether3_open (struct net_device *dev);
|
||||
static int ether3_sendpacket (struct sk_buff *skb, struct net_device *dev);
|
||||
static netdev_tx_t ether3_sendpacket(struct sk_buff *skb,
|
||||
struct net_device *dev);
|
||||
static irqreturn_t ether3_interrupt (int irq, void *dev_id);
|
||||
static int ether3_close (struct net_device *dev);
|
||||
static void ether3_setmulticastlist (struct net_device *dev);
|
||||
|
@ -481,7 +482,7 @@ static void ether3_timeout(struct net_device *dev)
|
|||
/*
|
||||
* Transmit a packet
|
||||
*/
|
||||
static int
|
||||
static netdev_tx_t
|
||||
ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
|
|
@ -578,7 +578,8 @@ static inline int sgiseeq_reset(struct net_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
static netdev_tx_t
|
||||
sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct sgiseeq_private *sp = netdev_priv(dev);
|
||||
struct hpc3_ethregs *hregs = sp->hregs;
|
||||
|
|
|
@ -99,7 +99,7 @@ struct ioc3_private {
|
|||
|
||||
static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
|
||||
static void ioc3_set_multicast_list(struct net_device *dev);
|
||||
static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
static void ioc3_timeout(struct net_device *dev);
|
||||
static inline unsigned int ioc3_hash(const unsigned char *addr);
|
||||
static inline void ioc3_stop(struct ioc3_private *ip);
|
||||
|
@ -1390,7 +1390,7 @@ static struct pci_driver ioc3_driver = {
|
|||
.remove = ioc3_remove_one,
|
||||
};
|
||||
|
||||
static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
unsigned long data;
|
||||
struct ioc3_private *ip = netdev_priv(dev);
|
||||
|
|
|
@ -697,7 +697,7 @@ static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb)
|
|||
/*
|
||||
* Transmit a packet (called by the kernel)
|
||||
*/
|
||||
static int meth_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
static netdev_tx_t meth_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct meth_private *priv = netdev_priv(dev);
|
||||
unsigned long flags;
|
||||
|
|
|
@ -258,10 +258,10 @@ struct stmmac_safety_stats {
|
|||
#define MAX_DMA_RIWT 0xff
|
||||
#define MIN_DMA_RIWT 0x20
|
||||
/* Tx coalesce parameters */
|
||||
#define STMMAC_COAL_TX_TIMER 40000
|
||||
#define STMMAC_COAL_TX_TIMER 1000
|
||||
#define STMMAC_MAX_COAL_TX_TICK 100000
|
||||
#define STMMAC_TX_MAX_FRAMES 256
|
||||
#define STMMAC_TX_FRAMES 64
|
||||
#define STMMAC_TX_FRAMES 25
|
||||
|
||||
/* Packets types */
|
||||
enum packets_types {
|
||||
|
|
|
@ -48,6 +48,8 @@ struct stmmac_tx_info {
|
|||
|
||||
/* Frequently used values are kept adjacent for cache effect */
|
||||
struct stmmac_tx_queue {
|
||||
u32 tx_count_frames;
|
||||
struct timer_list txtimer;
|
||||
u32 queue_index;
|
||||
struct stmmac_priv *priv_data;
|
||||
struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
|
||||
|
@ -73,7 +75,14 @@ struct stmmac_rx_queue {
|
|||
u32 rx_zeroc_thresh;
|
||||
dma_addr_t dma_rx_phy;
|
||||
u32 rx_tail_addr;
|
||||
};
|
||||
|
||||
struct stmmac_channel {
|
||||
struct napi_struct napi ____cacheline_aligned_in_smp;
|
||||
struct stmmac_priv *priv_data;
|
||||
u32 index;
|
||||
int has_rx;
|
||||
int has_tx;
|
||||
};
|
||||
|
||||
struct stmmac_tc_entry {
|
||||
|
@ -109,14 +118,12 @@ struct stmmac_pps_cfg {
|
|||
|
||||
struct stmmac_priv {
|
||||
/* Frequently used values are kept adjacent for cache effect */
|
||||
u32 tx_count_frames;
|
||||
u32 tx_coal_frames;
|
||||
u32 tx_coal_timer;
|
||||
|
||||
int tx_coalesce;
|
||||
int hwts_tx_en;
|
||||
bool tx_path_in_lpi_mode;
|
||||
struct timer_list txtimer;
|
||||
bool tso;
|
||||
|
||||
unsigned int dma_buf_sz;
|
||||
|
@ -137,6 +144,9 @@ struct stmmac_priv {
|
|||
/* TX Queue */
|
||||
struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
|
||||
|
||||
/* Generic channel for NAPI */
|
||||
struct stmmac_channel channel[STMMAC_CH_MAX];
|
||||
|
||||
bool oldlink;
|
||||
int speed;
|
||||
int oldduplex;
|
||||
|
|
|
@ -148,12 +148,14 @@ static void stmmac_verify_args(void)
|
|||
static void stmmac_disable_all_queues(struct stmmac_priv *priv)
|
||||
{
|
||||
u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
|
||||
u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
|
||||
u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
|
||||
u32 queue;
|
||||
|
||||
for (queue = 0; queue < rx_queues_cnt; queue++) {
|
||||
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
|
||||
for (queue = 0; queue < maxq; queue++) {
|
||||
struct stmmac_channel *ch = &priv->channel[queue];
|
||||
|
||||
napi_disable(&rx_q->napi);
|
||||
napi_disable(&ch->napi);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -164,12 +166,14 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv)
|
|||
static void stmmac_enable_all_queues(struct stmmac_priv *priv)
|
||||
{
|
||||
u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
|
||||
u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
|
||||
u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
|
||||
u32 queue;
|
||||
|
||||
for (queue = 0; queue < rx_queues_cnt; queue++) {
|
||||
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
|
||||
for (queue = 0; queue < maxq; queue++) {
|
||||
struct stmmac_channel *ch = &priv->channel[queue];
|
||||
|
||||
napi_enable(&rx_q->napi);
|
||||
napi_enable(&ch->napi);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1843,18 +1847,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
|
|||
* @queue: TX queue index
|
||||
* Description: it reclaims the transmit resources after transmission completes.
|
||||
*/
|
||||
static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
|
||||
static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
|
||||
{
|
||||
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
|
||||
unsigned int bytes_compl = 0, pkts_compl = 0;
|
||||
unsigned int entry;
|
||||
unsigned int entry, count = 0;
|
||||
|
||||
netif_tx_lock(priv->dev);
|
||||
__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
|
||||
|
||||
priv->xstats.tx_clean++;
|
||||
|
||||
entry = tx_q->dirty_tx;
|
||||
while (entry != tx_q->cur_tx) {
|
||||
while ((entry != tx_q->cur_tx) && (count < budget)) {
|
||||
struct sk_buff *skb = tx_q->tx_skbuff[entry];
|
||||
struct dma_desc *p;
|
||||
int status;
|
||||
|
@ -1870,6 +1874,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
|
|||
if (unlikely(status & tx_dma_own))
|
||||
break;
|
||||
|
||||
count++;
|
||||
|
||||
/* Make sure descriptor fields are read after reading
|
||||
* the own bit.
|
||||
*/
|
||||
|
@ -1937,7 +1943,10 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
|
|||
stmmac_enable_eee_mode(priv);
|
||||
mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
|
||||
}
|
||||
netif_tx_unlock(priv->dev);
|
||||
|
||||
__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2020,6 +2029,33 @@ static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
|
|||
return false;
|
||||
}
|
||||
|
||||
static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
|
||||
{
|
||||
int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
|
||||
&priv->xstats, chan);
|
||||
struct stmmac_channel *ch = &priv->channel[chan];
|
||||
bool needs_work = false;
|
||||
|
||||
if ((status & handle_rx) && ch->has_rx) {
|
||||
needs_work = true;
|
||||
} else {
|
||||
status &= ~handle_rx;
|
||||
}
|
||||
|
||||
if ((status & handle_tx) && ch->has_tx) {
|
||||
needs_work = true;
|
||||
} else {
|
||||
status &= ~handle_tx;
|
||||
}
|
||||
|
||||
if (needs_work && napi_schedule_prep(&ch->napi)) {
|
||||
stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
|
||||
__napi_schedule(&ch->napi);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* stmmac_dma_interrupt - DMA ISR
|
||||
* @priv: driver private structure
|
||||
|
@ -2034,57 +2070,14 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
|
|||
u32 channels_to_check = tx_channel_count > rx_channel_count ?
|
||||
tx_channel_count : rx_channel_count;
|
||||
u32 chan;
|
||||
bool poll_scheduled = false;
|
||||
int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
|
||||
|
||||
/* Make sure we never check beyond our status buffer. */
|
||||
if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
|
||||
channels_to_check = ARRAY_SIZE(status);
|
||||
|
||||
/* Each DMA channel can be used for rx and tx simultaneously, yet
|
||||
* napi_struct is embedded in struct stmmac_rx_queue rather than in a
|
||||
* stmmac_channel struct.
|
||||
* Because of this, stmmac_poll currently checks (and possibly wakes)
|
||||
* all tx queues rather than just a single tx queue.
|
||||
*/
|
||||
for (chan = 0; chan < channels_to_check; chan++)
|
||||
status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr,
|
||||
&priv->xstats, chan);
|
||||
|
||||
for (chan = 0; chan < rx_channel_count; chan++) {
|
||||
if (likely(status[chan] & handle_rx)) {
|
||||
struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
|
||||
|
||||
if (likely(napi_schedule_prep(&rx_q->napi))) {
|
||||
stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
|
||||
__napi_schedule(&rx_q->napi);
|
||||
poll_scheduled = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* If we scheduled poll, we already know that tx queues will be checked.
|
||||
* If we didn't schedule poll, see if any DMA channel (used by tx) has a
|
||||
* completed transmission, if so, call stmmac_poll (once).
|
||||
*/
|
||||
if (!poll_scheduled) {
|
||||
for (chan = 0; chan < tx_channel_count; chan++) {
|
||||
if (status[chan] & handle_tx) {
|
||||
/* It doesn't matter what rx queue we choose
|
||||
* here. We use 0 since it always exists.
|
||||
*/
|
||||
struct stmmac_rx_queue *rx_q =
|
||||
&priv->rx_queue[0];
|
||||
|
||||
if (likely(napi_schedule_prep(&rx_q->napi))) {
|
||||
stmmac_disable_dma_irq(priv,
|
||||
priv->ioaddr, chan);
|
||||
__napi_schedule(&rx_q->napi);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
status[chan] = stmmac_napi_check(priv, chan);
|
||||
|
||||
for (chan = 0; chan < tx_channel_count; chan++) {
|
||||
if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
|
||||
|
@ -2220,8 +2213,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
|
|||
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
|
||||
tx_q->dma_tx_phy, chan);
|
||||
|
||||
tx_q->tx_tail_addr = tx_q->dma_tx_phy +
|
||||
(DMA_TX_SIZE * sizeof(struct dma_desc));
|
||||
tx_q->tx_tail_addr = tx_q->dma_tx_phy;
|
||||
stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
|
||||
tx_q->tx_tail_addr, chan);
|
||||
}
|
||||
|
@ -2233,6 +2225,13 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
|
||||
{
|
||||
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
|
||||
|
||||
mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
|
||||
}
|
||||
|
||||
/**
|
||||
* stmmac_tx_timer - mitigation sw timer for tx.
|
||||
* @data: data pointer
|
||||
|
@ -2241,13 +2240,14 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
|
|||
*/
|
||||
static void stmmac_tx_timer(struct timer_list *t)
|
||||
{
|
||||
struct stmmac_priv *priv = from_timer(priv, t, txtimer);
|
||||
u32 tx_queues_count = priv->plat->tx_queues_to_use;
|
||||
u32 queue;
|
||||
struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
|
||||
struct stmmac_priv *priv = tx_q->priv_data;
|
||||
struct stmmac_channel *ch;
|
||||
|
||||
/* let's scan all the tx queues */
|
||||
for (queue = 0; queue < tx_queues_count; queue++)
|
||||
stmmac_tx_clean(priv, queue);
|
||||
ch = &priv->channel[tx_q->queue_index];
|
||||
|
||||
if (likely(napi_schedule_prep(&ch->napi)))
|
||||
__napi_schedule(&ch->napi);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2260,11 +2260,17 @@ static void stmmac_tx_timer(struct timer_list *t)
|
|||
*/
|
||||
static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
|
||||
{
|
||||
u32 tx_channel_count = priv->plat->tx_queues_to_use;
|
||||
u32 chan;
|
||||
|
||||
priv->tx_coal_frames = STMMAC_TX_FRAMES;
|
||||
priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
|
||||
timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
|
||||
priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
|
||||
add_timer(&priv->txtimer);
|
||||
|
||||
for (chan = 0; chan < tx_channel_count; chan++) {
|
||||
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
|
||||
|
||||
timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void stmmac_set_rings_length(struct stmmac_priv *priv)
|
||||
|
@ -2592,6 +2598,7 @@ static void stmmac_hw_teardown(struct net_device *dev)
|
|||
static int stmmac_open(struct net_device *dev)
|
||||
{
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
u32 chan;
|
||||
int ret;
|
||||
|
||||
stmmac_check_ether_addr(priv);
|
||||
|
@ -2688,7 +2695,9 @@ static int stmmac_open(struct net_device *dev)
|
|||
if (dev->phydev)
|
||||
phy_stop(dev->phydev);
|
||||
|
||||
del_timer_sync(&priv->txtimer);
|
||||
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
|
||||
del_timer_sync(&priv->tx_queue[chan].txtimer);
|
||||
|
||||
stmmac_hw_teardown(dev);
|
||||
init_error:
|
||||
free_dma_desc_resources(priv);
|
||||
|
@ -2708,6 +2717,7 @@ static int stmmac_open(struct net_device *dev)
|
|||
static int stmmac_release(struct net_device *dev)
|
||||
{
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
u32 chan;
|
||||
|
||||
if (priv->eee_enabled)
|
||||
del_timer_sync(&priv->eee_ctrl_timer);
|
||||
|
@ -2722,7 +2732,8 @@ static int stmmac_release(struct net_device *dev)
|
|||
|
||||
stmmac_disable_all_queues(priv);
|
||||
|
||||
del_timer_sync(&priv->txtimer);
|
||||
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
|
||||
del_timer_sync(&priv->tx_queue[chan].txtimer);
|
||||
|
||||
/* Free the IRQ lines */
|
||||
free_irq(dev->irq, dev);
|
||||
|
@ -2936,14 +2947,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
priv->xstats.tx_tso_nfrags += nfrags;
|
||||
|
||||
/* Manage tx mitigation */
|
||||
priv->tx_count_frames += nfrags + 1;
|
||||
if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
|
||||
mod_timer(&priv->txtimer,
|
||||
STMMAC_COAL_TIMER(priv->tx_coal_timer));
|
||||
} else {
|
||||
priv->tx_count_frames = 0;
|
||||
tx_q->tx_count_frames += nfrags + 1;
|
||||
if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
|
||||
stmmac_set_tx_ic(priv, desc);
|
||||
priv->xstats.tx_set_ic_bit++;
|
||||
tx_q->tx_count_frames = 0;
|
||||
} else {
|
||||
stmmac_tx_timer_arm(priv, queue);
|
||||
}
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
|
@ -2992,6 +3002,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
|
||||
|
||||
tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
|
||||
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
@ -3146,14 +3157,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
* This approach takes care about the fragments: desc is the first
|
||||
* element in case of no SG.
|
||||
*/
|
||||
priv->tx_count_frames += nfrags + 1;
|
||||
if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
|
||||
mod_timer(&priv->txtimer,
|
||||
STMMAC_COAL_TIMER(priv->tx_coal_timer));
|
||||
} else {
|
||||
priv->tx_count_frames = 0;
|
||||
tx_q->tx_count_frames += nfrags + 1;
|
||||
if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
|
||||
stmmac_set_tx_ic(priv, desc);
|
||||
priv->xstats.tx_set_ic_bit++;
|
||||
tx_q->tx_count_frames = 0;
|
||||
} else {
|
||||
stmmac_tx_timer_arm(priv, queue);
|
||||
}
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
|
@ -3199,6 +3209,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
|
||||
|
||||
stmmac_enable_dma_transmission(priv, priv->ioaddr);
|
||||
|
||||
tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
|
||||
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
@ -3319,6 +3331,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
|
|||
static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
||||
{
|
||||
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
|
||||
struct stmmac_channel *ch = &priv->channel[queue];
|
||||
unsigned int entry = rx_q->cur_rx;
|
||||
int coe = priv->hw->rx_csum;
|
||||
unsigned int next_entry;
|
||||
|
@ -3491,7 +3504,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
|||
else
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
|
||||
napi_gro_receive(&rx_q->napi, skb);
|
||||
napi_gro_receive(&ch->napi, skb);
|
||||
|
||||
priv->dev->stats.rx_packets++;
|
||||
priv->dev->stats.rx_bytes += frame_len;
|
||||
|
@ -3514,27 +3527,33 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
|||
* Description :
|
||||
* To look at the incoming frames and clear the tx resources.
|
||||
*/
|
||||
static int stmmac_poll(struct napi_struct *napi, int budget)
|
||||
static int stmmac_napi_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct stmmac_rx_queue *rx_q =
|
||||
container_of(napi, struct stmmac_rx_queue, napi);
|
||||
struct stmmac_priv *priv = rx_q->priv_data;
|
||||
u32 tx_count = priv->plat->tx_queues_to_use;
|
||||
u32 chan = rx_q->queue_index;
|
||||
int work_done = 0;
|
||||
u32 queue;
|
||||
struct stmmac_channel *ch =
|
||||
container_of(napi, struct stmmac_channel, napi);
|
||||
struct stmmac_priv *priv = ch->priv_data;
|
||||
int work_done = 0, work_rem = budget;
|
||||
u32 chan = ch->index;
|
||||
|
||||
priv->xstats.napi_poll++;
|
||||
|
||||
/* check all the queues */
|
||||
for (queue = 0; queue < tx_count; queue++)
|
||||
stmmac_tx_clean(priv, queue);
|
||||
if (ch->has_tx) {
|
||||
int done = stmmac_tx_clean(priv, work_rem, chan);
|
||||
|
||||
work_done = stmmac_rx(priv, budget, rx_q->queue_index);
|
||||
if (work_done < budget) {
|
||||
napi_complete_done(napi, work_done);
|
||||
stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
|
||||
work_done += done;
|
||||
work_rem -= done;
|
||||
}
|
||||
|
||||
if (ch->has_rx) {
|
||||
int done = stmmac_rx(priv, work_rem, chan);
|
||||
|
||||
work_done += done;
|
||||
work_rem -= done;
|
||||
}
|
||||
|
||||
if (work_done < budget && napi_complete_done(napi, work_done))
|
||||
stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
|
||||
|
||||
return work_done;
|
||||
}
|
||||
|
||||
|
@ -4198,8 +4217,8 @@ int stmmac_dvr_probe(struct device *device,
|
|||
{
|
||||
struct net_device *ndev = NULL;
|
||||
struct stmmac_priv *priv;
|
||||
u32 queue, maxq;
|
||||
int ret = 0;
|
||||
u32 queue;
|
||||
|
||||
ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
|
||||
MTL_MAX_TX_QUEUES,
|
||||
|
@ -4322,11 +4341,22 @@ int stmmac_dvr_probe(struct device *device,
|
|||
"Enable RX Mitigation via HW Watchdog Timer\n");
|
||||
}
|
||||
|
||||
for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
|
||||
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
|
||||
/* Setup channels NAPI */
|
||||
maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
|
||||
|
||||
netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
|
||||
(8 * priv->plat->rx_queues_to_use));
|
||||
for (queue = 0; queue < maxq; queue++) {
|
||||
struct stmmac_channel *ch = &priv->channel[queue];
|
||||
|
||||
ch->priv_data = priv;
|
||||
ch->index = queue;
|
||||
|
||||
if (queue < priv->plat->rx_queues_to_use)
|
||||
ch->has_rx = true;
|
||||
if (queue < priv->plat->tx_queues_to_use)
|
||||
ch->has_tx = true;
|
||||
|
||||
netif_napi_add(ndev, &ch->napi, stmmac_napi_poll,
|
||||
NAPI_POLL_WEIGHT);
|
||||
}
|
||||
|
||||
mutex_init(&priv->lock);
|
||||
|
@ -4372,10 +4402,10 @@ int stmmac_dvr_probe(struct device *device,
|
|||
priv->hw->pcs != STMMAC_PCS_RTBI)
|
||||
stmmac_mdio_unregister(ndev);
|
||||
error_mdio_register:
|
||||
for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
|
||||
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
|
||||
for (queue = 0; queue < maxq; queue++) {
|
||||
struct stmmac_channel *ch = &priv->channel[queue];
|
||||
|
||||
netif_napi_del(&rx_q->napi);
|
||||
netif_napi_del(&ch->napi);
|
||||
}
|
||||
error_hw_init:
|
||||
destroy_workqueue(priv->wq);
|
||||
|
|
|
@ -835,7 +835,7 @@ static void w5100_tx_work(struct work_struct *work)
|
|||
w5100_tx_skb(priv->ndev, skb);
|
||||
}
|
||||
|
||||
static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
|
||||
static netdev_tx_t w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
|
||||
{
|
||||
struct w5100_priv *priv = netdev_priv(ndev);
|
||||
|
||||
|
|
|
@ -365,7 +365,7 @@ static void w5300_tx_timeout(struct net_device *ndev)
|
|||
netif_wake_queue(ndev);
|
||||
}
|
||||
|
||||
static int w5300_start_tx(struct sk_buff *skb, struct net_device *ndev)
|
||||
static netdev_tx_t w5300_start_tx(struct sk_buff *skb, struct net_device *ndev)
|
||||
{
|
||||
struct w5300_priv *priv = netdev_priv(ndev);
|
||||
|
||||
|
|
|
@ -349,6 +349,7 @@ static int sfp_register_bus(struct sfp_bus *bus)
|
|||
}
|
||||
if (bus->started)
|
||||
bus->socket_ops->start(bus->sfp);
|
||||
bus->netdev->sfp_bus = bus;
|
||||
bus->registered = true;
|
||||
return 0;
|
||||
}
|
||||
|
@ -357,6 +358,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
|
|||
{
|
||||
const struct sfp_upstream_ops *ops = bus->upstream_ops;
|
||||
|
||||
bus->netdev->sfp_bus = NULL;
|
||||
if (bus->registered) {
|
||||
if (bus->started)
|
||||
bus->socket_ops->stop(bus->sfp);
|
||||
|
@ -438,7 +440,6 @@ static void sfp_upstream_clear(struct sfp_bus *bus)
|
|||
{
|
||||
bus->upstream_ops = NULL;
|
||||
bus->upstream = NULL;
|
||||
bus->netdev->sfp_bus = NULL;
|
||||
bus->netdev = NULL;
|
||||
}
|
||||
|
||||
|
@ -467,7 +468,6 @@ struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
|
|||
bus->upstream_ops = ops;
|
||||
bus->upstream = upstream;
|
||||
bus->netdev = ndev;
|
||||
ndev->sfp_bus = bus;
|
||||
|
||||
if (bus->sfp) {
|
||||
ret = sfp_register_bus(bus);
|
||||
|
|
|
@ -1153,43 +1153,6 @@ static netdev_features_t tun_net_fix_features(struct net_device *dev,
|
|||
|
||||
return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
|
||||
}
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void tun_poll_controller(struct net_device *dev)
|
||||
{
|
||||
/*
|
||||
* Tun only receives frames when:
|
||||
* 1) the char device endpoint gets data from user space
|
||||
* 2) the tun socket gets a sendmsg call from user space
|
||||
* If NAPI is not enabled, since both of those are synchronous
|
||||
* operations, we are guaranteed never to have pending data when we poll
|
||||
* for it so there is nothing to do here but return.
|
||||
* We need this though so netpoll recognizes us as an interface that
|
||||
* supports polling, which enables bridge devices in virt setups to
|
||||
* still use netconsole
|
||||
* If NAPI is enabled, however, we need to schedule polling for all
|
||||
* queues unless we are using napi_gro_frags(), which we call in
|
||||
* process context and not in NAPI context.
|
||||
*/
|
||||
struct tun_struct *tun = netdev_priv(dev);
|
||||
|
||||
if (tun->flags & IFF_NAPI) {
|
||||
struct tun_file *tfile;
|
||||
int i;
|
||||
|
||||
if (tun_napi_frags_enabled(tun))
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
for (i = 0; i < tun->numqueues; i++) {
|
||||
tfile = rcu_dereference(tun->tfiles[i]);
|
||||
if (tfile->napi_enabled)
|
||||
napi_schedule(&tfile->napi);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void tun_set_headroom(struct net_device *dev, int new_hr)
|
||||
{
|
||||
|
@ -1283,9 +1246,6 @@ static const struct net_device_ops tun_netdev_ops = {
|
|||
.ndo_start_xmit = tun_net_xmit,
|
||||
.ndo_fix_features = tun_net_fix_features,
|
||||
.ndo_select_queue = tun_select_queue,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = tun_poll_controller,
|
||||
#endif
|
||||
.ndo_set_rx_headroom = tun_set_headroom,
|
||||
.ndo_get_stats64 = tun_net_get_stats64,
|
||||
};
|
||||
|
@ -1365,9 +1325,6 @@ static const struct net_device_ops tap_netdev_ops = {
|
|||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_select_queue = tun_select_queue,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = tun_poll_controller,
|
||||
#endif
|
||||
.ndo_features_check = passthru_features_check,
|
||||
.ndo_set_rx_headroom = tun_set_headroom,
|
||||
.ndo_get_stats64 = tun_net_get_stats64,
|
||||
|
|
|
@ -1484,8 +1484,10 @@ static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
|
|||
snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
|
||||
hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr,
|
||||
name, NULL);
|
||||
if (!hpdev->pci_slot)
|
||||
if (IS_ERR(hpdev->pci_slot)) {
|
||||
pr_warn("pci_create slot %s failed\n", name);
|
||||
hpdev->pci_slot = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -49,8 +49,9 @@ struct netpoll_info {
|
|||
};
|
||||
|
||||
#ifdef CONFIG_NETPOLL
|
||||
extern void netpoll_poll_disable(struct net_device *dev);
|
||||
extern void netpoll_poll_enable(struct net_device *dev);
|
||||
void netpoll_poll_dev(struct net_device *dev);
|
||||
void netpoll_poll_disable(struct net_device *dev);
|
||||
void netpoll_poll_enable(struct net_device *dev);
|
||||
#else
|
||||
static inline void netpoll_poll_disable(struct net_device *dev) { return; }
|
||||
static inline void netpoll_poll_enable(struct net_device *dev) { return; }
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
|
||||
#define MTL_MAX_RX_QUEUES 8
|
||||
#define MTL_MAX_TX_QUEUES 8
|
||||
#define STMMAC_CH_MAX 8
|
||||
|
||||
#define STMMAC_RX_COE_NONE 0
|
||||
#define STMMAC_RX_COE_TYPE1 1
|
||||
|
|
|
@ -87,7 +87,7 @@ struct nfc_hci_pipe {
|
|||
* According to specification 102 622 chapter 4.4 Pipes,
|
||||
* the pipe identifier is 7 bits long.
|
||||
*/
|
||||
#define NFC_HCI_MAX_PIPES 127
|
||||
#define NFC_HCI_MAX_PIPES 128
|
||||
struct nfc_hci_init_data {
|
||||
u8 gate_count;
|
||||
struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES];
|
||||
|
|
|
@ -132,6 +132,7 @@ struct smap_psock {
|
|||
struct work_struct gc_work;
|
||||
|
||||
struct proto *sk_proto;
|
||||
void (*save_unhash)(struct sock *sk);
|
||||
void (*save_close)(struct sock *sk, long timeout);
|
||||
void (*save_data_ready)(struct sock *sk);
|
||||
void (*save_write_space)(struct sock *sk);
|
||||
|
@ -143,6 +144,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
|||
static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
|
||||
static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
|
||||
int offset, size_t size, int flags);
|
||||
static void bpf_tcp_unhash(struct sock *sk);
|
||||
static void bpf_tcp_close(struct sock *sk, long timeout);
|
||||
|
||||
static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
|
||||
|
@ -184,6 +186,7 @@ static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS],
|
|||
struct proto *base)
|
||||
{
|
||||
prot[SOCKMAP_BASE] = *base;
|
||||
prot[SOCKMAP_BASE].unhash = bpf_tcp_unhash;
|
||||
prot[SOCKMAP_BASE].close = bpf_tcp_close;
|
||||
prot[SOCKMAP_BASE].recvmsg = bpf_tcp_recvmsg;
|
||||
prot[SOCKMAP_BASE].stream_memory_read = bpf_tcp_stream_read;
|
||||
|
@ -217,6 +220,7 @@ static int bpf_tcp_init(struct sock *sk)
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
psock->save_unhash = sk->sk_prot->unhash;
|
||||
psock->save_close = sk->sk_prot->close;
|
||||
psock->sk_proto = sk->sk_prot;
|
||||
|
||||
|
@ -305,30 +309,12 @@ static struct smap_psock_map_entry *psock_map_pop(struct sock *sk,
|
|||
return e;
|
||||
}
|
||||
|
||||
static void bpf_tcp_close(struct sock *sk, long timeout)
|
||||
static void bpf_tcp_remove(struct sock *sk, struct smap_psock *psock)
|
||||
{
|
||||
void (*close_fun)(struct sock *sk, long timeout);
|
||||
struct smap_psock_map_entry *e;
|
||||
struct sk_msg_buff *md, *mtmp;
|
||||
struct smap_psock *psock;
|
||||
struct sock *osk;
|
||||
|
||||
lock_sock(sk);
|
||||
rcu_read_lock();
|
||||
psock = smap_psock_sk(sk);
|
||||
if (unlikely(!psock)) {
|
||||
rcu_read_unlock();
|
||||
release_sock(sk);
|
||||
return sk->sk_prot->close(sk, timeout);
|
||||
}
|
||||
|
||||
/* The psock may be destroyed anytime after exiting the RCU critial
|
||||
* section so by the time we use close_fun the psock may no longer
|
||||
* be valid. However, bpf_tcp_close is called with the sock lock
|
||||
* held so the close hook and sk are still valid.
|
||||
*/
|
||||
close_fun = psock->save_close;
|
||||
|
||||
if (psock->cork) {
|
||||
free_start_sg(psock->sock, psock->cork, true);
|
||||
kfree(psock->cork);
|
||||
|
@ -379,6 +365,42 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
|
|||
kfree(e);
|
||||
e = psock_map_pop(sk, psock);
|
||||
}
|
||||
}
|
||||
|
||||
static void bpf_tcp_unhash(struct sock *sk)
|
||||
{
|
||||
void (*unhash_fun)(struct sock *sk);
|
||||
struct smap_psock *psock;
|
||||
|
||||
rcu_read_lock();
|
||||
psock = smap_psock_sk(sk);
|
||||
if (unlikely(!psock)) {
|
||||
rcu_read_unlock();
|
||||
if (sk->sk_prot->unhash)
|
||||
sk->sk_prot->unhash(sk);
|
||||
return;
|
||||
}
|
||||
unhash_fun = psock->save_unhash;
|
||||
bpf_tcp_remove(sk, psock);
|
||||
rcu_read_unlock();
|
||||
unhash_fun(sk);
|
||||
}
|
||||
|
||||
static void bpf_tcp_close(struct sock *sk, long timeout)
|
||||
{
|
||||
void (*close_fun)(struct sock *sk, long timeout);
|
||||
struct smap_psock *psock;
|
||||
|
||||
lock_sock(sk);
|
||||
rcu_read_lock();
|
||||
psock = smap_psock_sk(sk);
|
||||
if (unlikely(!psock)) {
|
||||
rcu_read_unlock();
|
||||
release_sock(sk);
|
||||
return sk->sk_prot->close(sk, timeout);
|
||||
}
|
||||
close_fun = psock->save_close;
|
||||
bpf_tcp_remove(sk, psock);
|
||||
rcu_read_unlock();
|
||||
release_sock(sk);
|
||||
close_fun(sk, timeout);
|
||||
|
@ -2097,8 +2119,12 @@ static int sock_map_update_elem(struct bpf_map *map,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* ULPs are currently supported only for TCP sockets in ESTABLISHED
|
||||
* state.
|
||||
*/
|
||||
if (skops.sk->sk_type != SOCK_STREAM ||
|
||||
skops.sk->sk_protocol != IPPROTO_TCP) {
|
||||
skops.sk->sk_protocol != IPPROTO_TCP ||
|
||||
skops.sk->sk_state != TCP_ESTABLISHED) {
|
||||
fput(socket->file);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -2453,6 +2479,16 @@ static int sock_hash_update_elem(struct bpf_map *map,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* ULPs are currently supported only for TCP sockets in ESTABLISHED
|
||||
* state.
|
||||
*/
|
||||
if (skops.sk->sk_type != SOCK_STREAM ||
|
||||
skops.sk->sk_protocol != IPPROTO_TCP ||
|
||||
skops.sk->sk_state != TCP_ESTABLISHED) {
|
||||
fput(socket->file);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
lock_sock(skops.sk);
|
||||
preempt_disable();
|
||||
rcu_read_lock();
|
||||
|
@ -2543,10 +2579,22 @@ const struct bpf_map_ops sock_hash_ops = {
|
|||
.map_check_btf = map_check_no_btf,
|
||||
};
|
||||
|
||||
static bool bpf_is_valid_sock_op(struct bpf_sock_ops_kern *ops)
|
||||
{
|
||||
return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
|
||||
ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB;
|
||||
}
|
||||
BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
|
||||
struct bpf_map *, map, void *, key, u64, flags)
|
||||
{
|
||||
WARN_ON_ONCE(!rcu_read_lock_held());
|
||||
|
||||
/* ULPs are currently supported only for TCP sockets in ESTABLISHED
|
||||
* state. This checks that the sock ops triggering the update is
|
||||
* one indicating we are (or will be soon) in an ESTABLISHED state.
|
||||
*/
|
||||
if (!bpf_is_valid_sock_op(bpf_sock))
|
||||
return -EOPNOTSUPP;
|
||||
return sock_map_ctx_update_elem(bpf_sock, map, key, flags);
|
||||
}
|
||||
|
||||
|
@ -2565,6 +2613,9 @@ BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, bpf_sock,
|
|||
struct bpf_map *, map, void *, key, u64, flags)
|
||||
{
|
||||
WARN_ON_ONCE(!rcu_read_lock_held());
|
||||
|
||||
if (!bpf_is_valid_sock_op(bpf_sock))
|
||||
return -EOPNOTSUPP;
|
||||
return sock_hash_ctx_update_elem(bpf_sock, map, key, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -241,7 +241,7 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
|
|||
* the packet to be exactly of that size to make the link
|
||||
* throughput estimation effective.
|
||||
*/
|
||||
skb_put(skb, probe_len - hard_iface->bat_v.elp_skb->len);
|
||||
skb_put_zero(skb, probe_len - hard_iface->bat_v.elp_skb->len);
|
||||
|
||||
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
|
||||
"Sending unicast (probe) ELP packet on interface %s to %pM\n",
|
||||
|
@ -268,6 +268,7 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
|
|||
struct batadv_priv *bat_priv;
|
||||
struct sk_buff *skb;
|
||||
u32 elp_interval;
|
||||
bool ret;
|
||||
|
||||
bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work);
|
||||
hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v);
|
||||
|
@ -329,8 +330,11 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
|
|||
* may sleep and that is not allowed in an rcu protected
|
||||
* context. Therefore schedule a task for that.
|
||||
*/
|
||||
queue_work(batadv_event_workqueue,
|
||||
&hardif_neigh->bat_v.metric_work);
|
||||
ret = queue_work(batadv_event_workqueue,
|
||||
&hardif_neigh->bat_v.metric_work);
|
||||
|
||||
if (!ret)
|
||||
batadv_hardif_neigh_put(hardif_neigh);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
|
|
|
@ -1772,6 +1772,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
|
|||
{
|
||||
struct batadv_bla_backbone_gw *backbone_gw;
|
||||
struct ethhdr *ethhdr;
|
||||
bool ret;
|
||||
|
||||
ethhdr = eth_hdr(skb);
|
||||
|
||||
|
@ -1795,8 +1796,13 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
|
|||
if (unlikely(!backbone_gw))
|
||||
return true;
|
||||
|
||||
queue_work(batadv_event_workqueue, &backbone_gw->report_work);
|
||||
/* backbone_gw is unreferenced in the report work function function */
|
||||
ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
|
||||
|
||||
/* backbone_gw is unreferenced in the report work function function
|
||||
* if queue_work() call was successful
|
||||
*/
|
||||
if (!ret)
|
||||
batadv_backbone_gw_put(backbone_gw);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/netlink.h>
|
||||
#include <linux/rculist.h>
|
||||
|
@ -348,6 +349,9 @@ void batadv_gw_check_election(struct batadv_priv *bat_priv,
|
|||
* @bat_priv: the bat priv with all the soft interface information
|
||||
* @orig_node: originator announcing gateway capabilities
|
||||
* @gateway: announced bandwidth information
|
||||
*
|
||||
* Has to be called with the appropriate locks being acquired
|
||||
* (gw.list_lock).
|
||||
*/
|
||||
static void batadv_gw_node_add(struct batadv_priv *bat_priv,
|
||||
struct batadv_orig_node *orig_node,
|
||||
|
@ -355,6 +359,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
|
|||
{
|
||||
struct batadv_gw_node *gw_node;
|
||||
|
||||
lockdep_assert_held(&bat_priv->gw.list_lock);
|
||||
|
||||
if (gateway->bandwidth_down == 0)
|
||||
return;
|
||||
|
||||
|
@ -369,10 +375,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
|
|||
gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
|
||||
gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
|
||||
|
||||
spin_lock_bh(&bat_priv->gw.list_lock);
|
||||
kref_get(&gw_node->refcount);
|
||||
hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.gateway_list);
|
||||
spin_unlock_bh(&bat_priv->gw.list_lock);
|
||||
|
||||
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
|
||||
"Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n",
|
||||
|
@ -428,11 +432,14 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
|
|||
{
|
||||
struct batadv_gw_node *gw_node, *curr_gw = NULL;
|
||||
|
||||
spin_lock_bh(&bat_priv->gw.list_lock);
|
||||
gw_node = batadv_gw_node_get(bat_priv, orig_node);
|
||||
if (!gw_node) {
|
||||
batadv_gw_node_add(bat_priv, orig_node, gateway);
|
||||
spin_unlock_bh(&bat_priv->gw.list_lock);
|
||||
goto out;
|
||||
}
|
||||
spin_unlock_bh(&bat_priv->gw.list_lock);
|
||||
|
||||
if (gw_node->bandwidth_down == ntohl(gateway->bandwidth_down) &&
|
||||
gw_node->bandwidth_up == ntohl(gateway->bandwidth_up))
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#define BATADV_DRIVER_DEVICE "batman-adv"
|
||||
|
||||
#ifndef BATADV_SOURCE_VERSION
|
||||
#define BATADV_SOURCE_VERSION "2018.2"
|
||||
#define BATADV_SOURCE_VERSION "2018.3"
|
||||
#endif
|
||||
|
||||
/* B.A.T.M.A.N. parameters */
|
||||
|
|
|
@ -854,24 +854,6 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
|
|||
spinlock_t *lock; /* Used to lock list selected by "int in_coding" */
|
||||
struct list_head *list;
|
||||
|
||||
/* Check if nc_node is already added */
|
||||
nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding);
|
||||
|
||||
/* Node found */
|
||||
if (nc_node)
|
||||
return nc_node;
|
||||
|
||||
nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC);
|
||||
if (!nc_node)
|
||||
return NULL;
|
||||
|
||||
/* Initialize nc_node */
|
||||
INIT_LIST_HEAD(&nc_node->list);
|
||||
kref_init(&nc_node->refcount);
|
||||
ether_addr_copy(nc_node->addr, orig_node->orig);
|
||||
kref_get(&orig_neigh_node->refcount);
|
||||
nc_node->orig_node = orig_neigh_node;
|
||||
|
||||
/* Select ingoing or outgoing coding node */
|
||||
if (in_coding) {
|
||||
lock = &orig_neigh_node->in_coding_list_lock;
|
||||
|
@ -881,13 +863,34 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
|
|||
list = &orig_neigh_node->out_coding_list;
|
||||
}
|
||||
|
||||
spin_lock_bh(lock);
|
||||
|
||||
/* Check if nc_node is already added */
|
||||
nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding);
|
||||
|
||||
/* Node found */
|
||||
if (nc_node)
|
||||
goto unlock;
|
||||
|
||||
nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC);
|
||||
if (!nc_node)
|
||||
goto unlock;
|
||||
|
||||
/* Initialize nc_node */
|
||||
INIT_LIST_HEAD(&nc_node->list);
|
||||
kref_init(&nc_node->refcount);
|
||||
ether_addr_copy(nc_node->addr, orig_node->orig);
|
||||
kref_get(&orig_neigh_node->refcount);
|
||||
nc_node->orig_node = orig_neigh_node;
|
||||
|
||||
batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n",
|
||||
nc_node->addr, nc_node->orig_node->orig);
|
||||
|
||||
/* Add nc_node to orig_node */
|
||||
spin_lock_bh(lock);
|
||||
kref_get(&nc_node->refcount);
|
||||
list_add_tail_rcu(&nc_node->list, list);
|
||||
|
||||
unlock:
|
||||
spin_unlock_bh(lock);
|
||||
|
||||
return nc_node;
|
||||
|
|
|
@ -574,15 +574,20 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
|
|||
struct batadv_softif_vlan *vlan;
|
||||
int err;
|
||||
|
||||
spin_lock_bh(&bat_priv->softif_vlan_list_lock);
|
||||
|
||||
vlan = batadv_softif_vlan_get(bat_priv, vid);
|
||||
if (vlan) {
|
||||
batadv_softif_vlan_put(vlan);
|
||||
spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
|
||||
if (!vlan)
|
||||
if (!vlan) {
|
||||
spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
vlan->bat_priv = bat_priv;
|
||||
vlan->vid = vid;
|
||||
|
@ -590,17 +595,23 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
|
|||
|
||||
atomic_set(&vlan->ap_isolation, 0);
|
||||
|
||||
err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
|
||||
if (err) {
|
||||
kfree(vlan);
|
||||
return err;
|
||||
}
|
||||
|
||||
spin_lock_bh(&bat_priv->softif_vlan_list_lock);
|
||||
kref_get(&vlan->refcount);
|
||||
hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
|
||||
spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
|
||||
|
||||
/* batadv_sysfs_add_vlan cannot be in the spinlock section due to the
|
||||
* sleeping behavior of the sysfs functions and the fs_reclaim lock
|
||||
*/
|
||||
err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
|
||||
if (err) {
|
||||
/* ref for the function */
|
||||
batadv_softif_vlan_put(vlan);
|
||||
|
||||
/* ref for the list */
|
||||
batadv_softif_vlan_put(vlan);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* add a new TT local entry. This one will be marked with the NOPURGE
|
||||
* flag
|
||||
*/
|
||||
|
|
|
@ -188,7 +188,8 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
|
|||
\
|
||||
return __batadv_store_uint_attr(buff, count, _min, _max, \
|
||||
_post_func, attr, \
|
||||
&bat_priv->_var, net_dev); \
|
||||
&bat_priv->_var, net_dev, \
|
||||
NULL); \
|
||||
}
|
||||
|
||||
#define BATADV_ATTR_SIF_SHOW_UINT(_name, _var) \
|
||||
|
@ -262,7 +263,9 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
|
|||
\
|
||||
length = __batadv_store_uint_attr(buff, count, _min, _max, \
|
||||
_post_func, attr, \
|
||||
&hard_iface->_var, net_dev); \
|
||||
&hard_iface->_var, \
|
||||
hard_iface->soft_iface, \
|
||||
net_dev); \
|
||||
\
|
||||
batadv_hardif_put(hard_iface); \
|
||||
return length; \
|
||||
|
@ -356,10 +359,12 @@ __batadv_store_bool_attr(char *buff, size_t count,
|
|||
|
||||
static int batadv_store_uint_attr(const char *buff, size_t count,
|
||||
struct net_device *net_dev,
|
||||
struct net_device *slave_dev,
|
||||
const char *attr_name,
|
||||
unsigned int min, unsigned int max,
|
||||
atomic_t *attr)
|
||||
{
|
||||
char ifname[IFNAMSIZ + 3] = "";
|
||||
unsigned long uint_val;
|
||||
int ret;
|
||||
|
||||
|
@ -385,8 +390,11 @@ static int batadv_store_uint_attr(const char *buff, size_t count,
|
|||
if (atomic_read(attr) == uint_val)
|
||||
return count;
|
||||
|
||||
batadv_info(net_dev, "%s: Changing from: %i to: %lu\n",
|
||||
attr_name, atomic_read(attr), uint_val);
|
||||
if (slave_dev)
|
||||
snprintf(ifname, sizeof(ifname), "%s: ", slave_dev->name);
|
||||
|
||||
batadv_info(net_dev, "%s: %sChanging from: %i to: %lu\n",
|
||||
attr_name, ifname, atomic_read(attr), uint_val);
|
||||
|
||||
atomic_set(attr, uint_val);
|
||||
return count;
|
||||
|
@ -397,12 +405,13 @@ static ssize_t __batadv_store_uint_attr(const char *buff, size_t count,
|
|||
void (*post_func)(struct net_device *),
|
||||
const struct attribute *attr,
|
||||
atomic_t *attr_store,
|
||||
struct net_device *net_dev)
|
||||
struct net_device *net_dev,
|
||||
struct net_device *slave_dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = batadv_store_uint_attr(buff, count, net_dev, attr->name, min, max,
|
||||
attr_store);
|
||||
ret = batadv_store_uint_attr(buff, count, net_dev, slave_dev,
|
||||
attr->name, min, max, attr_store);
|
||||
if (post_func && ret)
|
||||
post_func(net_dev);
|
||||
|
||||
|
@ -571,7 +580,7 @@ static ssize_t batadv_store_gw_sel_class(struct kobject *kobj,
|
|||
return __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE,
|
||||
batadv_post_gw_reselect, attr,
|
||||
&bat_priv->gw.sel_class,
|
||||
bat_priv->soft_iface);
|
||||
bat_priv->soft_iface, NULL);
|
||||
}
|
||||
|
||||
static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
|
||||
|
@ -1090,8 +1099,9 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
|
|||
if (old_tp_override == tp_override)
|
||||
goto out;
|
||||
|
||||
batadv_info(net_dev, "%s: Changing from: %u.%u MBit to: %u.%u MBit\n",
|
||||
"throughput_override",
|
||||
batadv_info(hard_iface->soft_iface,
|
||||
"%s: %s: Changing from: %u.%u MBit to: %u.%u MBit\n",
|
||||
"throughput_override", net_dev->name,
|
||||
old_tp_override / 10, old_tp_override % 10,
|
||||
tp_override / 10, tp_override % 10);
|
||||
|
||||
|
|
|
@ -1613,6 +1613,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
|
|||
{
|
||||
struct batadv_tt_orig_list_entry *orig_entry;
|
||||
|
||||
spin_lock_bh(&tt_global->list_lock);
|
||||
|
||||
orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
|
||||
if (orig_entry) {
|
||||
/* refresh the ttvn: the current value could be a bogus one that
|
||||
|
@ -1635,11 +1637,9 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
|
|||
orig_entry->flags = flags;
|
||||
kref_init(&orig_entry->refcount);
|
||||
|
||||
spin_lock_bh(&tt_global->list_lock);
|
||||
kref_get(&orig_entry->refcount);
|
||||
hlist_add_head_rcu(&orig_entry->list,
|
||||
&tt_global->orig_list);
|
||||
spin_unlock_bh(&tt_global->list_lock);
|
||||
atomic_inc(&tt_global->orig_list_count);
|
||||
|
||||
sync_flags:
|
||||
|
@ -1647,6 +1647,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
|
|||
out:
|
||||
if (orig_entry)
|
||||
batadv_tt_orig_list_entry_put(orig_entry);
|
||||
|
||||
spin_unlock_bh(&tt_global->list_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -529,15 +529,20 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
|
|||
{
|
||||
struct batadv_tvlv_handler *tvlv_handler;
|
||||
|
||||
spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
|
||||
|
||||
tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
|
||||
if (tvlv_handler) {
|
||||
spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
|
||||
batadv_tvlv_handler_put(tvlv_handler);
|
||||
return;
|
||||
}
|
||||
|
||||
tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
|
||||
if (!tvlv_handler)
|
||||
if (!tvlv_handler) {
|
||||
spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
tvlv_handler->ogm_handler = optr;
|
||||
tvlv_handler->unicast_handler = uptr;
|
||||
|
@ -547,7 +552,6 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
|
|||
kref_init(&tvlv_handler->refcount);
|
||||
INIT_HLIST_NODE(&tvlv_handler->list);
|
||||
|
||||
spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
|
||||
kref_get(&tvlv_handler->refcount);
|
||||
hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
|
||||
spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
|
||||
|
|
|
@ -2592,7 +2592,7 @@ static int devlink_resource_fill(struct genl_info *info,
|
|||
if (!nlh) {
|
||||
err = devlink_dpipe_send_and_alloc_skb(&skb, info);
|
||||
if (err)
|
||||
goto err_skb_send_alloc;
|
||||
return err;
|
||||
goto send_done;
|
||||
}
|
||||
return genlmsg_reply(skb, info);
|
||||
|
@ -2600,7 +2600,6 @@ static int devlink_resource_fill(struct genl_info *info,
|
|||
nla_put_failure:
|
||||
err = -EMSGSIZE;
|
||||
err_resource_put:
|
||||
err_skb_send_alloc:
|
||||
nlmsg_free(skb);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -2624,6 +2624,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
|
|||
case ETHTOOL_GPHYSTATS:
|
||||
case ETHTOOL_GTSO:
|
||||
case ETHTOOL_GPERMADDR:
|
||||
case ETHTOOL_GUFO:
|
||||
case ETHTOOL_GGSO:
|
||||
case ETHTOOL_GGRO:
|
||||
case ETHTOOL_GFLAGS:
|
||||
|
|
|
@ -187,16 +187,16 @@ static void poll_napi(struct net_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
static void netpoll_poll_dev(struct net_device *dev)
|
||||
void netpoll_poll_dev(struct net_device *dev)
|
||||
{
|
||||
const struct net_device_ops *ops;
|
||||
struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
|
||||
const struct net_device_ops *ops;
|
||||
|
||||
/* Don't do any rx activity if the dev_lock mutex is held
|
||||
* the dev_open/close paths use this to block netpoll activity
|
||||
* while changing device state
|
||||
*/
|
||||
if (down_trylock(&ni->dev_lock))
|
||||
if (!ni || down_trylock(&ni->dev_lock))
|
||||
return;
|
||||
|
||||
if (!netif_running(dev)) {
|
||||
|
@ -205,13 +205,8 @@ static void netpoll_poll_dev(struct net_device *dev)
|
|||
}
|
||||
|
||||
ops = dev->netdev_ops;
|
||||
if (!ops->ndo_poll_controller) {
|
||||
up(&ni->dev_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Process pending work on NIC */
|
||||
ops->ndo_poll_controller(dev);
|
||||
if (ops->ndo_poll_controller)
|
||||
ops->ndo_poll_controller(dev);
|
||||
|
||||
poll_napi(dev);
|
||||
|
||||
|
@ -219,6 +214,7 @@ static void netpoll_poll_dev(struct net_device *dev)
|
|||
|
||||
zap_completion_queue();
|
||||
}
|
||||
EXPORT_SYMBOL(netpoll_poll_dev);
|
||||
|
||||
void netpoll_poll_disable(struct net_device *dev)
|
||||
{
|
||||
|
@ -613,8 +609,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
|
|||
strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
|
||||
INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
|
||||
|
||||
if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
|
||||
!ndev->netdev_ops->ndo_poll_controller) {
|
||||
if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
|
||||
np_err(np, "%s doesn't support polling, aborting\n",
|
||||
np->dev_name);
|
||||
err = -ENOTSUPP;
|
||||
|
|
|
@ -627,6 +627,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
|||
const struct iphdr *tnl_params, u8 protocol)
|
||||
{
|
||||
struct ip_tunnel *tunnel = netdev_priv(dev);
|
||||
unsigned int inner_nhdr_len = 0;
|
||||
const struct iphdr *inner_iph;
|
||||
struct flowi4 fl4;
|
||||
u8 tos, ttl;
|
||||
|
@ -636,6 +637,14 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
|||
__be32 dst;
|
||||
bool connected;
|
||||
|
||||
/* ensure we can access the inner net header, for several users below */
|
||||
if (skb->protocol == htons(ETH_P_IP))
|
||||
inner_nhdr_len = sizeof(struct iphdr);
|
||||
else if (skb->protocol == htons(ETH_P_IPV6))
|
||||
inner_nhdr_len = sizeof(struct ipv6hdr);
|
||||
if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
|
||||
goto tx_error;
|
||||
|
||||
inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
|
||||
connected = (tunnel->parms.iph.daddr != 0);
|
||||
|
||||
|
|
|
@ -4201,7 +4201,6 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
|
|||
p++;
|
||||
continue;
|
||||
}
|
||||
state->offset++;
|
||||
return ifa;
|
||||
}
|
||||
|
||||
|
@ -4225,13 +4224,12 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
|
|||
return ifa;
|
||||
}
|
||||
|
||||
state->offset = 0;
|
||||
while (++state->bucket < IN6_ADDR_HSIZE) {
|
||||
state->offset = 0;
|
||||
hlist_for_each_entry_rcu(ifa,
|
||||
&inet6_addr_lst[state->bucket], addr_lst) {
|
||||
if (!net_eq(dev_net(ifa->idev->dev), net))
|
||||
continue;
|
||||
state->offset++;
|
||||
return ifa;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1234,7 +1234,7 @@ static inline int
|
|||
ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct ip6_tnl *t = netdev_priv(dev);
|
||||
const struct iphdr *iph = ip_hdr(skb);
|
||||
const struct iphdr *iph;
|
||||
int encap_limit = -1;
|
||||
struct flowi6 fl6;
|
||||
__u8 dsfield;
|
||||
|
@ -1242,6 +1242,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
u8 tproto;
|
||||
int err;
|
||||
|
||||
/* ensure we can access the full inner ip header */
|
||||
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
|
||||
return -1;
|
||||
|
||||
iph = ip_hdr(skb);
|
||||
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
|
||||
|
||||
tproto = READ_ONCE(t->parms.proto);
|
||||
|
@ -1306,7 +1311,7 @@ static inline int
|
|||
ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct ip6_tnl *t = netdev_priv(dev);
|
||||
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
|
||||
struct ipv6hdr *ipv6h;
|
||||
int encap_limit = -1;
|
||||
__u16 offset;
|
||||
struct flowi6 fl6;
|
||||
|
@ -1315,6 +1320,10 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
u8 tproto;
|
||||
int err;
|
||||
|
||||
if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
|
||||
return -1;
|
||||
|
||||
ipv6h = ipv6_hdr(skb);
|
||||
tproto = READ_ONCE(t->parms.proto);
|
||||
if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
|
||||
ip6_tnl_addr_conflict(t, ipv6h))
|
||||
|
|
|
@ -364,11 +364,14 @@ EXPORT_SYMBOL(ip6_dst_alloc);
|
|||
|
||||
static void ip6_dst_destroy(struct dst_entry *dst)
|
||||
{
|
||||
struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
|
||||
struct rt6_info *rt = (struct rt6_info *)dst;
|
||||
struct fib6_info *from;
|
||||
struct inet6_dev *idev;
|
||||
|
||||
dst_destroy_metrics_generic(dst);
|
||||
if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
|
||||
kfree(p);
|
||||
|
||||
rt6_uncached_list_del(rt);
|
||||
|
||||
idev = rt->rt6i_idev;
|
||||
|
@ -976,6 +979,10 @@ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
|
|||
rt->rt6i_flags &= ~RTF_EXPIRES;
|
||||
rcu_assign_pointer(rt->from, from);
|
||||
dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true);
|
||||
if (from->fib6_metrics != &dst_default_metrics) {
|
||||
rt->dst._metrics |= DST_METRICS_REFCOUNTED;
|
||||
refcount_inc(&from->fib6_metrics->refcnt);
|
||||
}
|
||||
}
|
||||
|
||||
/* Caller must already hold reference to @ort */
|
||||
|
|
|
@ -1533,10 +1533,14 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
|
|||
unsigned int flags;
|
||||
|
||||
if (event == NETDEV_REGISTER) {
|
||||
/* For now just support Ethernet, IPGRE, SIT and IPIP devices */
|
||||
|
||||
/* For now just support Ethernet, IPGRE, IP6GRE, SIT and
|
||||
* IPIP devices
|
||||
*/
|
||||
if (dev->type == ARPHRD_ETHER ||
|
||||
dev->type == ARPHRD_LOOPBACK ||
|
||||
dev->type == ARPHRD_IPGRE ||
|
||||
dev->type == ARPHRD_IP6GRE ||
|
||||
dev->type == ARPHRD_SIT ||
|
||||
dev->type == ARPHRD_TUNNEL) {
|
||||
mdev = mpls_add_dev(dev);
|
||||
|
|
|
@ -781,7 +781,8 @@ static int netlbl_unlabel_addrinfo_get(struct genl_info *info,
|
|||
{
|
||||
u32 addr_len;
|
||||
|
||||
if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR]) {
|
||||
if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR] &&
|
||||
info->attrs[NLBL_UNLABEL_A_IPV4MASK]) {
|
||||
addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]);
|
||||
if (addr_len != sizeof(struct in_addr) &&
|
||||
addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV4MASK]))
|
||||
|
|
|
@ -209,6 +209,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
|
|||
}
|
||||
create_info = (struct hci_create_pipe_resp *)skb->data;
|
||||
|
||||
if (create_info->pipe >= NFC_HCI_MAX_PIPES) {
|
||||
status = NFC_HCI_ANY_E_NOK;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Save the new created pipe and bind with local gate,
|
||||
* the description for skb->data[3] is destination gate id
|
||||
* but since we received this cmd from host controller, we
|
||||
|
@ -232,6 +237,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
|
|||
}
|
||||
delete_info = (struct hci_delete_pipe_noti *)skb->data;
|
||||
|
||||
if (delete_info->pipe >= NFC_HCI_MAX_PIPES) {
|
||||
status = NFC_HCI_ANY_E_NOK;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE;
|
||||
hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST;
|
||||
break;
|
||||
|
|
|
@ -443,7 +443,7 @@ int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
|
|||
int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
|
||||
|
||||
/* ib_stats.c */
|
||||
DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
|
||||
DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats);
|
||||
#define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
|
||||
#define rds_ib_stats_add(member, count) \
|
||||
rds_stats_add_which(rds_ib_stats, member, count)
|
||||
|
|
|
@ -260,6 +260,7 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
|
|||
bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
|
||||
{
|
||||
struct dst_entry *dst = sctp_transport_dst_check(t);
|
||||
struct sock *sk = t->asoc->base.sk;
|
||||
bool change = true;
|
||||
|
||||
if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
|
||||
|
@ -271,12 +272,19 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
|
|||
pmtu = SCTP_TRUNC4(pmtu);
|
||||
|
||||
if (dst) {
|
||||
dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu);
|
||||
struct sctp_pf *pf = sctp_get_pf_specific(dst->ops->family);
|
||||
union sctp_addr addr;
|
||||
|
||||
pf->af->from_sk(&addr, sk);
|
||||
pf->to_sk_daddr(&t->ipaddr, sk);
|
||||
dst->ops->update_pmtu(dst, sk, NULL, pmtu);
|
||||
pf->to_sk_daddr(&addr, sk);
|
||||
|
||||
dst = sctp_transport_dst_check(t);
|
||||
}
|
||||
|
||||
if (!dst) {
|
||||
t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk);
|
||||
t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
|
||||
dst = t->dst;
|
||||
}
|
||||
|
||||
|
|
|
@ -742,7 +742,10 @@ static void smc_connect_work(struct work_struct *work)
|
|||
smc->sk.sk_err = -rc;
|
||||
|
||||
out:
|
||||
smc->sk.sk_state_change(&smc->sk);
|
||||
if (smc->sk.sk_err)
|
||||
smc->sk.sk_state_change(&smc->sk);
|
||||
else
|
||||
smc->sk.sk_write_space(&smc->sk);
|
||||
kfree(smc->connect_info);
|
||||
smc->connect_info = NULL;
|
||||
release_sock(&smc->sk);
|
||||
|
@ -1150,9 +1153,9 @@ static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact)
|
|||
}
|
||||
|
||||
/* listen worker: finish RDMA setup */
|
||||
static void smc_listen_rdma_finish(struct smc_sock *new_smc,
|
||||
struct smc_clc_msg_accept_confirm *cclc,
|
||||
int local_contact)
|
||||
static int smc_listen_rdma_finish(struct smc_sock *new_smc,
|
||||
struct smc_clc_msg_accept_confirm *cclc,
|
||||
int local_contact)
|
||||
{
|
||||
struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
|
||||
int reason_code = 0;
|
||||
|
@ -1175,11 +1178,12 @@ static void smc_listen_rdma_finish(struct smc_sock *new_smc,
|
|||
if (reason_code)
|
||||
goto decline;
|
||||
}
|
||||
return;
|
||||
return 0;
|
||||
|
||||
decline:
|
||||
mutex_unlock(&smc_create_lgr_pending);
|
||||
smc_listen_decline(new_smc, reason_code, local_contact);
|
||||
return reason_code;
|
||||
}
|
||||
|
||||
/* setup for RDMA connection of server */
|
||||
|
@ -1276,8 +1280,10 @@ static void smc_listen_work(struct work_struct *work)
|
|||
}
|
||||
|
||||
/* finish worker */
|
||||
if (!ism_supported)
|
||||
smc_listen_rdma_finish(new_smc, &cclc, local_contact);
|
||||
if (!ism_supported) {
|
||||
if (smc_listen_rdma_finish(new_smc, &cclc, local_contact))
|
||||
return;
|
||||
}
|
||||
smc_conn_save_peer_info(new_smc, &cclc);
|
||||
mutex_unlock(&smc_create_lgr_pending);
|
||||
smc_listen_out_connected(new_smc);
|
||||
|
@ -1529,7 +1535,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
|
|||
return EPOLLNVAL;
|
||||
|
||||
smc = smc_sk(sock->sk);
|
||||
if ((sk->sk_state == SMC_INIT) || smc->use_fallback) {
|
||||
if (smc->use_fallback) {
|
||||
/* delegate to CLC child sock */
|
||||
mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
|
||||
sk->sk_err = smc->clcsock->sk->sk_err;
|
||||
|
@ -1560,9 +1566,9 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
|
|||
mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
|
||||
if (sk->sk_state == SMC_APPCLOSEWAIT1)
|
||||
mask |= EPOLLIN;
|
||||
if (smc->conn.urg_state == SMC_URG_VALID)
|
||||
mask |= EPOLLPRI;
|
||||
}
|
||||
if (smc->conn.urg_state == SMC_URG_VALID)
|
||||
mask |= EPOLLPRI;
|
||||
}
|
||||
|
||||
return mask;
|
||||
|
|
|
@ -446,14 +446,12 @@ int smc_clc_send_proposal(struct smc_sock *smc, int smc_type,
|
|||
vec[i++].iov_len = sizeof(trl);
|
||||
/* due to the few bytes needed for clc-handshake this cannot block */
|
||||
len = kernel_sendmsg(smc->clcsock, &msg, vec, i, plen);
|
||||
if (len < sizeof(pclc)) {
|
||||
if (len >= 0) {
|
||||
reason_code = -ENETUNREACH;
|
||||
smc->sk.sk_err = -reason_code;
|
||||
} else {
|
||||
smc->sk.sk_err = smc->clcsock->sk->sk_err;
|
||||
reason_code = -smc->sk.sk_err;
|
||||
}
|
||||
if (len < 0) {
|
||||
smc->sk.sk_err = smc->clcsock->sk->sk_err;
|
||||
reason_code = -smc->sk.sk_err;
|
||||
} else if (len < (int)sizeof(pclc)) {
|
||||
reason_code = -ENETUNREACH;
|
||||
smc->sk.sk_err = -reason_code;
|
||||
}
|
||||
|
||||
return reason_code;
|
||||
|
|
|
@ -100,15 +100,14 @@ static void smc_close_active_abort(struct smc_sock *smc)
|
|||
struct smc_cdc_conn_state_flags *txflags =
|
||||
&smc->conn.local_tx_ctrl.conn_state_flags;
|
||||
|
||||
sk->sk_err = ECONNABORTED;
|
||||
if (smc->clcsock && smc->clcsock->sk) {
|
||||
smc->clcsock->sk->sk_err = ECONNABORTED;
|
||||
smc->clcsock->sk->sk_state_change(smc->clcsock->sk);
|
||||
if (sk->sk_state != SMC_INIT && smc->clcsock && smc->clcsock->sk) {
|
||||
sk->sk_err = ECONNABORTED;
|
||||
if (smc->clcsock && smc->clcsock->sk) {
|
||||
smc->clcsock->sk->sk_err = ECONNABORTED;
|
||||
smc->clcsock->sk->sk_state_change(smc->clcsock->sk);
|
||||
}
|
||||
}
|
||||
switch (sk->sk_state) {
|
||||
case SMC_INIT:
|
||||
sk->sk_state = SMC_PEERABORTWAIT;
|
||||
break;
|
||||
case SMC_ACTIVE:
|
||||
sk->sk_state = SMC_PEERABORTWAIT;
|
||||
release_sock(sk);
|
||||
|
@ -143,6 +142,7 @@ static void smc_close_active_abort(struct smc_sock *smc)
|
|||
case SMC_PEERFINCLOSEWAIT:
|
||||
sock_put(sk); /* passive closing */
|
||||
break;
|
||||
case SMC_INIT:
|
||||
case SMC_PEERABORTWAIT:
|
||||
case SMC_CLOSED:
|
||||
break;
|
||||
|
|
|
@ -461,7 +461,7 @@ static const struct genl_ops smc_pnet_ops[] = {
|
|||
};
|
||||
|
||||
/* SMC_PNETID family definition */
|
||||
static struct genl_family smc_pnet_nl_family = {
|
||||
static struct genl_family smc_pnet_nl_family __ro_after_init = {
|
||||
.hdrsize = 0,
|
||||
.name = SMCR_GENL_FAMILY_NAME,
|
||||
.version = SMCR_GENL_FAMILY_VERSION,
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
// SPDX-License-Identifier: (LGPL-2.0+ OR BSD-2-Clause)
|
||||
/* Copyright (C) 2018 Netronome Systems, Inc. */
|
||||
|
||||
#ifndef __TOOLS_LIBC_COMPAT_H
|
||||
|
|
|
@ -580,7 +580,11 @@ static void test_sockmap(int tasks, void *data)
|
|||
/* Test update without programs */
|
||||
for (i = 0; i < 6; i++) {
|
||||
err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY);
|
||||
if (err) {
|
||||
if (i < 2 && !err) {
|
||||
printf("Allowed update sockmap '%i:%i' not in ESTABLISHED\n",
|
||||
i, sfd[i]);
|
||||
goto out_sockmap;
|
||||
} else if (i >= 2 && err) {
|
||||
printf("Failed noprog update sockmap '%i:%i'\n",
|
||||
i, sfd[i]);
|
||||
goto out_sockmap;
|
||||
|
@ -741,7 +745,7 @@ static void test_sockmap(int tasks, void *data)
|
|||
}
|
||||
|
||||
/* Test map update elem afterwards fd lives in fd and map_fd */
|
||||
for (i = 0; i < 6; i++) {
|
||||
for (i = 2; i < 6; i++) {
|
||||
err = bpf_map_update_elem(map_fd_rx, &i, &sfd[i], BPF_ANY);
|
||||
if (err) {
|
||||
printf("Failed map_fd_rx update sockmap %i '%i:%i'\n",
|
||||
|
@ -845,7 +849,7 @@ static void test_sockmap(int tasks, void *data)
|
|||
}
|
||||
|
||||
/* Delete the elems without programs */
|
||||
for (i = 0; i < 6; i++) {
|
||||
for (i = 2; i < 6; i++) {
|
||||
err = bpf_map_delete_elem(fd, &i);
|
||||
if (err) {
|
||||
printf("Failed delete sockmap %i '%i:%i'\n",
|
||||
|
|
|
@ -178,8 +178,8 @@ setup() {
|
|||
|
||||
cleanup() {
|
||||
[ ${cleanup_done} -eq 1 ] && return
|
||||
ip netns del ${NS_A} 2 > /dev/null
|
||||
ip netns del ${NS_B} 2 > /dev/null
|
||||
ip netns del ${NS_A} 2> /dev/null
|
||||
ip netns del ${NS_B} 2> /dev/null
|
||||
cleanup_done=1
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue