Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Check iwlwifi 9000 reorder buffer out-of-space condition properly, from Sara Sharon. 2) Fix RCU splat in qualcomm rmnet driver, from Subash Abhinov Kasiviswanathan. 3) Fix session and tunnel release races in l2tp, from Guillaume Nault and Sabrina Dubroca. 4) Fix endian bug in sctp_diag_dump(), from Dan Carpenter. 5) Several mlx5 driver fixes from the Mellanox folks (max flow counters cap check, invalid memory access in IPoIB support, etc.) 6) tun_get_user() should bail if skb->len is zero, from Alexander Potapenko. 7) Fix RCU lookups in inetpeer, from Eric Dumazet. 8) Fix locking in packet_do_bund(). 9) Handle cb->start() error properly in netlink dump code, from Jason A. Donenfeld. 10) Handle multicast properly in UDP socket early demux code. From Paolo Abeni. 11) Several erspan bug fixes in ip_gre, from Xin Long. 12) Fix use-after-free in socket filter code, in order to handle the fact that listener lock is no longer taken during the three-way TCP handshake. From Eric Dumazet. 13) Fix infoleak in RTM_GETSTATS, from Nikolay Aleksandrov. 14) Fix tail call generation in x86-64 BPF JIT, from Alexei Starovoitov. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (77 commits) net: 8021q: skip packets if the vlan is down bpf: fix bpf_tail_call() x64 JIT net: stmmac: dwmac-rk: Add RK3128 GMAC support rndis_host: support Novatel Verizon USB730L net: rtnetlink: fix info leak in RTM_GETSTATS call socket, bpf: fix possible use after free mlxsw: spectrum_router: Track RIF of IPIP next hops mlxsw: spectrum_router: Move VRF refcounting net: hns3: Fix an error handling path in 'hclge_rss_init_hw()' net: mvpp2: Fix clock resource by adding an optional bus clock r8152: add Linksys USB3GIGV1 id l2tp: fix l2tp_eth module loading ip_gre: erspan device should keep dst ip_gre: set tunnel hlen properly in erspan_tunnel_init ip_gre: check packet length and mtu correctly in erspan_xmit ip_gre: get key from session_id correctly in erspan_rcv tipc: use only positive error codes in messages ppp: fix __percpu annotation udp: perform source validation for mcast early demux IPv4: early demux can return an error code ...
This commit is contained in:
commit
9a431ef962
|
@ -21,8 +21,9 @@ Required properties:
|
|||
- main controller clock (for both armada-375-pp2 and armada-7k-pp2)
|
||||
- GOP clock (for both armada-375-pp2 and armada-7k-pp2)
|
||||
- MG clock (only for armada-7k-pp2)
|
||||
- clock-names: names of used clocks, must be "pp_clk", "gop_clk" and
|
||||
"mg_clk" (the latter only for armada-7k-pp2).
|
||||
- AXI clock (only for armada-7k-pp2)
|
||||
- clock-names: names of used clocks, must be "pp_clk", "gop_clk", "mg_clk"
|
||||
and "axi_clk" (the 2 latter only for armada-7k-pp2).
|
||||
|
||||
The ethernet ports are represented by subnodes. At least one port is
|
||||
required.
|
||||
|
@ -78,8 +79,9 @@ Example for marvell,armada-7k-pp2:
|
|||
cpm_ethernet: ethernet@0 {
|
||||
compatible = "marvell,armada-7k-pp22";
|
||||
reg = <0x0 0x100000>, <0x129000 0xb000>;
|
||||
clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>, <&cpm_syscon0 1 5>;
|
||||
clock-names = "pp_clk", "gop_clk", "gp_clk";
|
||||
clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>,
|
||||
<&cpm_syscon0 1 5>, <&cpm_syscon0 1 18>;
|
||||
clock-names = "pp_clk", "gop_clk", "gp_clk", "axi_clk";
|
||||
|
||||
eth0: eth0 {
|
||||
interrupts = <ICU_GRP_NSR 39 IRQ_TYPE_LEVEL_HIGH>,
|
||||
|
|
|
@ -4,6 +4,7 @@ The device node has following properties.
|
|||
|
||||
Required properties:
|
||||
- compatible: should be "rockchip,<name>-gamc"
|
||||
"rockchip,rk3128-gmac": found on RK312x SoCs
|
||||
"rockchip,rk3228-gmac": found on RK322x SoCs
|
||||
"rockchip,rk3288-gmac": found on RK3288 SoCs
|
||||
"rockchip,rk3328-gmac": found on RK3328 SoCs
|
||||
|
|
|
@ -284,9 +284,9 @@ static void emit_bpf_tail_call(u8 **pprog)
|
|||
/* if (index >= array->map.max_entries)
|
||||
* goto out;
|
||||
*/
|
||||
EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */
|
||||
EMIT2(0x89, 0xD2); /* mov edx, edx */
|
||||
EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
|
||||
offsetof(struct bpf_array, map.max_entries));
|
||||
EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */
|
||||
#define OFFSET1 43 /* number of bytes to jump */
|
||||
EMIT2(X86_JBE, OFFSET1); /* jbe out */
|
||||
label1 = cnt;
|
||||
|
|
|
@ -1100,6 +1100,10 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
|
|||
};
|
||||
int i, err;
|
||||
|
||||
/* DSA and CPU ports have to be members of multiple vlans */
|
||||
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
|
||||
return 0;
|
||||
|
||||
if (!vid_begin)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
|
@ -3947,7 +3951,9 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
|
|||
if (chip->irq > 0) {
|
||||
if (chip->info->g2_irqs > 0)
|
||||
mv88e6xxx_g2_irq_free(chip);
|
||||
mutex_lock(&chip->reg_lock);
|
||||
mv88e6xxx_g1_irq_free(chip);
|
||||
mutex_unlock(&chip->reg_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -51,6 +51,10 @@
|
|||
|
||||
#define AQ_CFG_SKB_FRAGS_MAX 32U
|
||||
|
||||
/* Number of descriptors available in one ring to resume this ring queue
|
||||
*/
|
||||
#define AQ_CFG_RESTART_DESC_THRES (AQ_CFG_SKB_FRAGS_MAX * 2)
|
||||
|
||||
#define AQ_CFG_NAPI_WEIGHT 64U
|
||||
|
||||
#define AQ_CFG_MULTICAST_ADDRESS_MAX 32U
|
||||
|
|
|
@ -119,6 +119,35 @@ int aq_nic_cfg_start(struct aq_nic_s *self)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int aq_nic_update_link_status(struct aq_nic_s *self)
|
||||
{
|
||||
int err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps)
|
||||
pr_info("%s: link change old %d new %d\n",
|
||||
AQ_CFG_DRV_NAME, self->link_status.mbps,
|
||||
self->aq_hw->aq_link_status.mbps);
|
||||
|
||||
self->link_status = self->aq_hw->aq_link_status;
|
||||
if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
|
||||
aq_utils_obj_set(&self->header.flags,
|
||||
AQ_NIC_FLAG_STARTED);
|
||||
aq_utils_obj_clear(&self->header.flags,
|
||||
AQ_NIC_LINK_DOWN);
|
||||
netif_carrier_on(self->ndev);
|
||||
netif_tx_wake_all_queues(self->ndev);
|
||||
}
|
||||
if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
|
||||
netif_carrier_off(self->ndev);
|
||||
netif_tx_disable(self->ndev);
|
||||
aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void aq_nic_service_timer_cb(unsigned long param)
|
||||
{
|
||||
struct aq_nic_s *self = (struct aq_nic_s *)param;
|
||||
|
@ -131,26 +160,13 @@ static void aq_nic_service_timer_cb(unsigned long param)
|
|||
if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
|
||||
goto err_exit;
|
||||
|
||||
err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
|
||||
if (err < 0)
|
||||
err = aq_nic_update_link_status(self);
|
||||
if (err)
|
||||
goto err_exit;
|
||||
|
||||
self->link_status = self->aq_hw->aq_link_status;
|
||||
|
||||
self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
|
||||
self->aq_nic_cfg.is_interrupt_moderation);
|
||||
|
||||
if (self->link_status.mbps) {
|
||||
aq_utils_obj_set(&self->header.flags,
|
||||
AQ_NIC_FLAG_STARTED);
|
||||
aq_utils_obj_clear(&self->header.flags,
|
||||
AQ_NIC_LINK_DOWN);
|
||||
netif_carrier_on(self->ndev);
|
||||
} else {
|
||||
netif_carrier_off(self->ndev);
|
||||
aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
|
||||
}
|
||||
|
||||
memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
|
||||
memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
|
||||
for (i = AQ_DIMOF(self->aq_vec); i--;) {
|
||||
|
@ -214,7 +230,6 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
|
|||
SET_NETDEV_DEV(ndev, dev);
|
||||
|
||||
ndev->if_port = port;
|
||||
ndev->min_mtu = ETH_MIN_MTU;
|
||||
self->ndev = ndev;
|
||||
|
||||
self->aq_pci_func = aq_pci_func;
|
||||
|
@ -241,7 +256,6 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
|
|||
int aq_nic_ndev_register(struct aq_nic_s *self)
|
||||
{
|
||||
int err = 0;
|
||||
unsigned int i = 0U;
|
||||
|
||||
if (!self->ndev) {
|
||||
err = -EINVAL;
|
||||
|
@ -263,8 +277,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
|
|||
|
||||
netif_carrier_off(self->ndev);
|
||||
|
||||
for (i = AQ_CFG_VECS_MAX; i--;)
|
||||
aq_nic_ndev_queue_stop(self, i);
|
||||
netif_tx_disable(self->ndev);
|
||||
|
||||
err = register_netdev(self->ndev);
|
||||
if (err < 0)
|
||||
|
@ -283,6 +296,7 @@ int aq_nic_ndev_init(struct aq_nic_s *self)
|
|||
self->ndev->features = aq_hw_caps->hw_features;
|
||||
self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
|
||||
self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
|
||||
self->ndev->max_mtu = self->aq_hw_caps.mtu - ETH_FCS_LEN - ETH_HLEN;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -318,12 +332,8 @@ struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
|
|||
err = -EINVAL;
|
||||
goto err_exit;
|
||||
}
|
||||
if (netif_running(ndev)) {
|
||||
unsigned int i;
|
||||
|
||||
for (i = AQ_CFG_VECS_MAX; i--;)
|
||||
netif_stop_subqueue(ndev, i);
|
||||
}
|
||||
if (netif_running(ndev))
|
||||
netif_tx_disable(ndev);
|
||||
|
||||
for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
|
||||
self->aq_vecs++) {
|
||||
|
@ -383,16 +393,6 @@ int aq_nic_init(struct aq_nic_s *self)
|
|||
return err;
|
||||
}
|
||||
|
||||
void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx)
|
||||
{
|
||||
netif_start_subqueue(self->ndev, idx);
|
||||
}
|
||||
|
||||
void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx)
|
||||
{
|
||||
netif_stop_subqueue(self->ndev, idx);
|
||||
}
|
||||
|
||||
int aq_nic_start(struct aq_nic_s *self)
|
||||
{
|
||||
struct aq_vec_s *aq_vec = NULL;
|
||||
|
@ -451,10 +451,6 @@ int aq_nic_start(struct aq_nic_s *self)
|
|||
goto err_exit;
|
||||
}
|
||||
|
||||
for (i = 0U, aq_vec = self->aq_vec[0];
|
||||
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
|
||||
aq_nic_ndev_queue_start(self, i);
|
||||
|
||||
err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
|
||||
if (err < 0)
|
||||
goto err_exit;
|
||||
|
@ -463,6 +459,8 @@ int aq_nic_start(struct aq_nic_s *self)
|
|||
if (err < 0)
|
||||
goto err_exit;
|
||||
|
||||
netif_tx_start_all_queues(self->ndev);
|
||||
|
||||
err_exit:
|
||||
return err;
|
||||
}
|
||||
|
@ -475,6 +473,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
|
|||
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
unsigned int frag_count = 0U;
|
||||
unsigned int dx = ring->sw_tail;
|
||||
struct aq_ring_buff_s *first = NULL;
|
||||
struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
|
||||
|
||||
if (unlikely(skb_is_gso(skb))) {
|
||||
|
@ -485,6 +484,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
|
|||
dx_buff->len_l4 = tcp_hdrlen(skb);
|
||||
dx_buff->mss = skb_shinfo(skb)->gso_size;
|
||||
dx_buff->is_txc = 1U;
|
||||
dx_buff->eop_index = 0xffffU;
|
||||
|
||||
dx_buff->is_ipv6 =
|
||||
(ip_hdr(skb)->version == 6) ? 1U : 0U;
|
||||
|
@ -504,6 +504,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
|
|||
if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa)))
|
||||
goto exit;
|
||||
|
||||
first = dx_buff;
|
||||
dx_buff->len_pkt = skb->len;
|
||||
dx_buff->is_sop = 1U;
|
||||
dx_buff->is_mapped = 1U;
|
||||
|
@ -532,40 +533,46 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
|
|||
|
||||
for (; nr_frags--; ++frag_count) {
|
||||
unsigned int frag_len = 0U;
|
||||
unsigned int buff_offset = 0U;
|
||||
unsigned int buff_size = 0U;
|
||||
dma_addr_t frag_pa;
|
||||
skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
|
||||
|
||||
frag_len = skb_frag_size(frag);
|
||||
frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0,
|
||||
frag_len, DMA_TO_DEVICE);
|
||||
|
||||
if (unlikely(dma_mapping_error(aq_nic_get_dev(self), frag_pa)))
|
||||
while (frag_len) {
|
||||
if (frag_len > AQ_CFG_TX_FRAME_MAX)
|
||||
buff_size = AQ_CFG_TX_FRAME_MAX;
|
||||
else
|
||||
buff_size = frag_len;
|
||||
|
||||
frag_pa = skb_frag_dma_map(aq_nic_get_dev(self),
|
||||
frag,
|
||||
buff_offset,
|
||||
buff_size,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
if (unlikely(dma_mapping_error(aq_nic_get_dev(self),
|
||||
frag_pa)))
|
||||
goto mapping_error;
|
||||
|
||||
while (frag_len > AQ_CFG_TX_FRAME_MAX) {
|
||||
dx = aq_ring_next_dx(ring, dx);
|
||||
dx_buff = &ring->buff_ring[dx];
|
||||
|
||||
dx_buff->flags = 0U;
|
||||
dx_buff->len = AQ_CFG_TX_FRAME_MAX;
|
||||
dx_buff->len = buff_size;
|
||||
dx_buff->pa = frag_pa;
|
||||
dx_buff->is_mapped = 1U;
|
||||
dx_buff->eop_index = 0xffffU;
|
||||
|
||||
frag_len -= buff_size;
|
||||
buff_offset += buff_size;
|
||||
|
||||
frag_len -= AQ_CFG_TX_FRAME_MAX;
|
||||
frag_pa += AQ_CFG_TX_FRAME_MAX;
|
||||
++ret;
|
||||
}
|
||||
|
||||
dx = aq_ring_next_dx(ring, dx);
|
||||
dx_buff = &ring->buff_ring[dx];
|
||||
|
||||
dx_buff->flags = 0U;
|
||||
dx_buff->len = frag_len;
|
||||
dx_buff->pa = frag_pa;
|
||||
dx_buff->is_mapped = 1U;
|
||||
++ret;
|
||||
}
|
||||
|
||||
first->eop_index = dx;
|
||||
dx_buff->is_eop = 1U;
|
||||
dx_buff->skb = skb;
|
||||
goto exit;
|
||||
|
@ -602,7 +609,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
|
|||
unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
|
||||
unsigned int tc = 0U;
|
||||
int err = NETDEV_TX_OK;
|
||||
bool is_nic_in_bad_state;
|
||||
|
||||
frags = skb_shinfo(skb)->nr_frags + 1;
|
||||
|
||||
|
@ -613,13 +619,10 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
|
|||
goto err_exit;
|
||||
}
|
||||
|
||||
is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags,
|
||||
AQ_NIC_FLAGS_IS_NOT_TX_READY) ||
|
||||
(aq_ring_avail_dx(ring) <
|
||||
AQ_CFG_SKB_FRAGS_MAX);
|
||||
aq_ring_update_queue_state(ring);
|
||||
|
||||
if (is_nic_in_bad_state) {
|
||||
aq_nic_ndev_queue_stop(self, ring->idx);
|
||||
/* Above status update may stop the queue. Check this. */
|
||||
if (__netif_subqueue_stopped(self->ndev, ring->idx)) {
|
||||
err = NETDEV_TX_BUSY;
|
||||
goto err_exit;
|
||||
}
|
||||
|
@ -631,9 +634,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
|
|||
ring,
|
||||
frags);
|
||||
if (err >= 0) {
|
||||
if (aq_ring_avail_dx(ring) < AQ_CFG_SKB_FRAGS_MAX + 1)
|
||||
aq_nic_ndev_queue_stop(self, ring->idx);
|
||||
|
||||
++ring->stats.tx.packets;
|
||||
ring->stats.tx.bytes += skb->len;
|
||||
}
|
||||
|
@ -693,16 +693,9 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
|
|||
|
||||
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (new_mtu > self->aq_hw_caps.mtu) {
|
||||
err = -EINVAL;
|
||||
goto err_exit;
|
||||
}
|
||||
self->aq_nic_cfg.mtu = new_mtu;
|
||||
|
||||
err_exit:
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
|
||||
|
@ -905,9 +898,7 @@ int aq_nic_stop(struct aq_nic_s *self)
|
|||
struct aq_vec_s *aq_vec = NULL;
|
||||
unsigned int i = 0U;
|
||||
|
||||
for (i = 0U, aq_vec = self->aq_vec[0];
|
||||
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
|
||||
aq_nic_ndev_queue_stop(self, i);
|
||||
netif_tx_disable(self->ndev);
|
||||
|
||||
del_timer_sync(&self->service_timer);
|
||||
|
||||
|
|
|
@ -83,8 +83,6 @@ struct net_device *aq_nic_get_ndev(struct aq_nic_s *self);
|
|||
int aq_nic_init(struct aq_nic_s *self);
|
||||
int aq_nic_cfg_start(struct aq_nic_s *self);
|
||||
int aq_nic_ndev_register(struct aq_nic_s *self);
|
||||
void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx);
|
||||
void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx);
|
||||
void aq_nic_ndev_free(struct aq_nic_s *self);
|
||||
int aq_nic_start(struct aq_nic_s *self);
|
||||
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
|
||||
|
|
|
@ -104,6 +104,38 @@ int aq_ring_init(struct aq_ring_s *self)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i,
|
||||
unsigned int t)
|
||||
{
|
||||
return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
|
||||
}
|
||||
|
||||
void aq_ring_update_queue_state(struct aq_ring_s *ring)
|
||||
{
|
||||
if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
|
||||
aq_ring_queue_stop(ring);
|
||||
else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
|
||||
aq_ring_queue_wake(ring);
|
||||
}
|
||||
|
||||
void aq_ring_queue_wake(struct aq_ring_s *ring)
|
||||
{
|
||||
struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
|
||||
|
||||
if (__netif_subqueue_stopped(ndev, ring->idx)) {
|
||||
netif_wake_subqueue(ndev, ring->idx);
|
||||
ring->stats.tx.queue_restarts++;
|
||||
}
|
||||
}
|
||||
|
||||
void aq_ring_queue_stop(struct aq_ring_s *ring)
|
||||
{
|
||||
struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
|
||||
|
||||
if (!__netif_subqueue_stopped(ndev, ring->idx))
|
||||
netif_stop_subqueue(ndev, ring->idx);
|
||||
}
|
||||
|
||||
void aq_ring_tx_clean(struct aq_ring_s *self)
|
||||
{
|
||||
struct device *dev = aq_nic_get_dev(self->aq_nic);
|
||||
|
@ -113,23 +145,28 @@ void aq_ring_tx_clean(struct aq_ring_s *self)
|
|||
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
|
||||
|
||||
if (likely(buff->is_mapped)) {
|
||||
if (unlikely(buff->is_sop))
|
||||
if (unlikely(buff->is_sop)) {
|
||||
if (!buff->is_eop &&
|
||||
buff->eop_index != 0xffffU &&
|
||||
(!aq_ring_dx_in_range(self->sw_head,
|
||||
buff->eop_index,
|
||||
self->hw_head)))
|
||||
break;
|
||||
|
||||
dma_unmap_single(dev, buff->pa, buff->len,
|
||||
DMA_TO_DEVICE);
|
||||
else
|
||||
} else {
|
||||
dma_unmap_page(dev, buff->pa, buff->len,
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(buff->is_eop))
|
||||
dev_kfree_skb_any(buff->skb);
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i,
|
||||
unsigned int t)
|
||||
{
|
||||
return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
|
||||
buff->pa = 0U;
|
||||
buff->eop_index = 0xffffU;
|
||||
}
|
||||
}
|
||||
|
||||
#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
|
||||
|
|
|
@ -65,7 +65,7 @@ struct __packed aq_ring_buff_s {
|
|||
};
|
||||
union {
|
||||
struct {
|
||||
u32 len:16;
|
||||
u16 len;
|
||||
u32 is_ip_cso:1;
|
||||
u32 is_udp_cso:1;
|
||||
u32 is_tcp_cso:1;
|
||||
|
@ -77,8 +77,10 @@ struct __packed aq_ring_buff_s {
|
|||
u32 is_cleaned:1;
|
||||
u32 is_error:1;
|
||||
u32 rsvd3:6;
|
||||
u16 eop_index;
|
||||
u16 rsvd4;
|
||||
};
|
||||
u32 flags;
|
||||
u64 flags;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -94,6 +96,7 @@ struct aq_ring_stats_tx_s {
|
|||
u64 errors;
|
||||
u64 packets;
|
||||
u64 bytes;
|
||||
u64 queue_restarts;
|
||||
};
|
||||
|
||||
union aq_ring_stats_s {
|
||||
|
@ -147,6 +150,9 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
|
|||
int aq_ring_init(struct aq_ring_s *self);
|
||||
void aq_ring_rx_deinit(struct aq_ring_s *self);
|
||||
void aq_ring_free(struct aq_ring_s *self);
|
||||
void aq_ring_update_queue_state(struct aq_ring_s *ring);
|
||||
void aq_ring_queue_wake(struct aq_ring_s *ring);
|
||||
void aq_ring_queue_stop(struct aq_ring_s *ring);
|
||||
void aq_ring_tx_clean(struct aq_ring_s *self);
|
||||
int aq_ring_rx_clean(struct aq_ring_s *self,
|
||||
struct napi_struct *napi,
|
||||
|
|
|
@ -59,12 +59,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
|
|||
if (ring[AQ_VEC_TX_ID].sw_head !=
|
||||
ring[AQ_VEC_TX_ID].hw_head) {
|
||||
aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
|
||||
|
||||
if (aq_ring_avail_dx(&ring[AQ_VEC_TX_ID]) >
|
||||
AQ_CFG_SKB_FRAGS_MAX) {
|
||||
aq_nic_ndev_queue_start(self->aq_nic,
|
||||
ring[AQ_VEC_TX_ID].idx);
|
||||
}
|
||||
aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]);
|
||||
was_tx_cleaned = true;
|
||||
}
|
||||
|
||||
|
@ -364,6 +359,7 @@ void aq_vec_add_stats(struct aq_vec_s *self,
|
|||
stats_tx->packets += tx->packets;
|
||||
stats_tx->bytes += tx->bytes;
|
||||
stats_tx->errors += tx->errors;
|
||||
stats_tx->queue_restarts += tx->queue_restarts;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
#include "../aq_common.h"
|
||||
|
||||
#define HW_ATL_B0_MTU_JUMBO (16000U)
|
||||
#define HW_ATL_B0_MTU_JUMBO 16352U
|
||||
#define HW_ATL_B0_MTU 1514U
|
||||
|
||||
#define HW_ATL_B0_TX_RINGS 4U
|
||||
|
|
|
@ -351,8 +351,7 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
|
|||
break;
|
||||
|
||||
default:
|
||||
link_status->mbps = 0U;
|
||||
break;
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1278,7 +1278,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
|
|||
|
||||
ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
|
||||
if (ret)
|
||||
return -ENOMEM;
|
||||
goto error;
|
||||
|
||||
n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
|
||||
for (i = 0, j = 0; i < cp->max_cid_space; i++) {
|
||||
|
|
|
@ -2652,7 +2652,8 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
|
|||
dev_err(&hdev->pdev->dev,
|
||||
"Configure rss tc size failed, invalid TC_SIZE = %d\n",
|
||||
rss_size);
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
roundup_size = roundup_pow_of_two(rss_size);
|
||||
|
|
|
@ -333,7 +333,7 @@
|
|||
#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
|
||||
#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1)
|
||||
#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
|
||||
#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
|
||||
#define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4)
|
||||
#define MVPP2_GMAC_DISABLE_PADDING BIT(5)
|
||||
#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
|
||||
#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
|
||||
|
@ -676,6 +676,7 @@ enum mvpp2_tag_type {
|
|||
#define MVPP2_PRS_RI_L3_MCAST BIT(15)
|
||||
#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
|
||||
#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
|
||||
#define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17)
|
||||
#define MVPP2_PRS_RI_UDF3_MASK 0x300000
|
||||
#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
|
||||
#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
|
||||
|
@ -792,6 +793,7 @@ struct mvpp2 {
|
|||
struct clk *pp_clk;
|
||||
struct clk *gop_clk;
|
||||
struct clk *mg_clk;
|
||||
struct clk *axi_clk;
|
||||
|
||||
/* List of pointers to port structures */
|
||||
struct mvpp2_port **port_list;
|
||||
|
@ -2315,7 +2317,7 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
|
|||
(proto != IPPROTO_IGMP))
|
||||
return -EINVAL;
|
||||
|
||||
/* Fragmented packet */
|
||||
/* Not fragmented packet */
|
||||
tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
|
||||
MVPP2_PE_LAST_FREE_TID);
|
||||
if (tid < 0)
|
||||
|
@ -2334,8 +2336,12 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
|
|||
MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
|
||||
mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
|
||||
MVPP2_PRS_IPV4_DIP_AI_BIT);
|
||||
mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
|
||||
ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
|
||||
mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
|
||||
|
||||
mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
|
||||
MVPP2_PRS_TCAM_PROTO_MASK_L);
|
||||
mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
|
||||
MVPP2_PRS_TCAM_PROTO_MASK);
|
||||
|
||||
mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
|
||||
mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
|
||||
|
@ -2346,7 +2352,7 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
|
|||
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
|
||||
mvpp2_prs_hw_write(priv, &pe);
|
||||
|
||||
/* Not fragmented packet */
|
||||
/* Fragmented packet */
|
||||
tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
|
||||
MVPP2_PE_LAST_FREE_TID);
|
||||
if (tid < 0)
|
||||
|
@ -2358,8 +2364,11 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
|
|||
pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
|
||||
mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
|
||||
|
||||
mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
|
||||
mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
|
||||
mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
|
||||
ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
|
||||
|
||||
mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
|
||||
mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
|
||||
|
||||
/* Update shadow table and hw entry */
|
||||
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
|
||||
|
@ -4591,7 +4600,6 @@ static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port)
|
|||
val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
|
||||
} else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
|
||||
val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
|
||||
val |= MVPP2_GMAC_PORT_RGMII_MASK;
|
||||
}
|
||||
writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
|
||||
|
||||
|
@ -7496,7 +7504,7 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
|
|||
/* Ports initialization */
|
||||
static int mvpp2_port_probe(struct platform_device *pdev,
|
||||
struct device_node *port_node,
|
||||
struct mvpp2 *priv)
|
||||
struct mvpp2 *priv, int index)
|
||||
{
|
||||
struct device_node *phy_node;
|
||||
struct phy *comphy;
|
||||
|
@ -7670,7 +7678,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
|
|||
}
|
||||
netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
|
||||
|
||||
priv->port_list[id] = port;
|
||||
priv->port_list[index] = port;
|
||||
return 0;
|
||||
|
||||
err_free_port_pcpu:
|
||||
|
@ -7963,6 +7971,18 @@ static int mvpp2_probe(struct platform_device *pdev)
|
|||
err = clk_prepare_enable(priv->mg_clk);
|
||||
if (err < 0)
|
||||
goto err_gop_clk;
|
||||
|
||||
priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
|
||||
if (IS_ERR(priv->axi_clk)) {
|
||||
err = PTR_ERR(priv->axi_clk);
|
||||
if (err == -EPROBE_DEFER)
|
||||
goto err_gop_clk;
|
||||
priv->axi_clk = NULL;
|
||||
} else {
|
||||
err = clk_prepare_enable(priv->axi_clk);
|
||||
if (err < 0)
|
||||
goto err_gop_clk;
|
||||
}
|
||||
}
|
||||
|
||||
/* Get system's tclk rate */
|
||||
|
@ -8005,16 +8025,19 @@ static int mvpp2_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
/* Initialize ports */
|
||||
i = 0;
|
||||
for_each_available_child_of_node(dn, port_node) {
|
||||
err = mvpp2_port_probe(pdev, port_node, priv);
|
||||
err = mvpp2_port_probe(pdev, port_node, priv, i);
|
||||
if (err < 0)
|
||||
goto err_mg_clk;
|
||||
i++;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, priv);
|
||||
return 0;
|
||||
|
||||
err_mg_clk:
|
||||
clk_disable_unprepare(priv->axi_clk);
|
||||
if (priv->hw_version == MVPP22)
|
||||
clk_disable_unprepare(priv->mg_clk);
|
||||
err_gop_clk:
|
||||
|
@ -8052,6 +8075,7 @@ static int mvpp2_remove(struct platform_device *pdev)
|
|||
aggr_txq->descs_dma);
|
||||
}
|
||||
|
||||
clk_disable_unprepare(priv->axi_clk);
|
||||
clk_disable_unprepare(priv->mg_clk);
|
||||
clk_disable_unprepare(priv->pp_clk);
|
||||
clk_disable_unprepare(priv->gop_clk);
|
||||
|
|
|
@ -139,7 +139,7 @@ TRACE_EVENT(mlx5_fs_del_fg,
|
|||
{MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"}
|
||||
|
||||
TRACE_EVENT(mlx5_fs_set_fte,
|
||||
TP_PROTO(const struct fs_fte *fte, bool new_fte),
|
||||
TP_PROTO(const struct fs_fte *fte, int new_fte),
|
||||
TP_ARGS(fte, new_fte),
|
||||
TP_STRUCT__entry(
|
||||
__field(const struct fs_fte *, fte)
|
||||
|
@ -149,7 +149,7 @@ TRACE_EVENT(mlx5_fs_set_fte,
|
|||
__field(u32, action)
|
||||
__field(u32, flow_tag)
|
||||
__field(u8, mask_enable)
|
||||
__field(bool, new_fte)
|
||||
__field(int, new_fte)
|
||||
__array(u32, mask_outer, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4))
|
||||
__array(u32, mask_inner, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4))
|
||||
__array(u32, mask_misc, MLX5_ST_SZ_DW(fte_match_set_misc))
|
||||
|
|
|
@ -291,7 +291,7 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
|
|||
priv->fs.vlan.filter_disabled = false;
|
||||
if (priv->netdev->flags & IFF_PROMISC)
|
||||
return;
|
||||
mlx5e_del_any_vid_rules(priv);
|
||||
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
|
||||
}
|
||||
|
||||
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
|
||||
|
@ -302,7 +302,7 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
|
|||
priv->fs.vlan.filter_disabled = true;
|
||||
if (priv->netdev->flags & IFF_PROMISC)
|
||||
return;
|
||||
mlx5e_add_any_vid_rules(priv);
|
||||
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
|
||||
}
|
||||
|
||||
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
|
||||
|
|
|
@ -184,7 +184,6 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
|
|||
struct mlx5e_sw_stats temp, *s = &temp;
|
||||
struct mlx5e_rq_stats *rq_stats;
|
||||
struct mlx5e_sq_stats *sq_stats;
|
||||
u64 tx_offload_none = 0;
|
||||
int i, j;
|
||||
|
||||
memset(s, 0, sizeof(*s));
|
||||
|
@ -199,6 +198,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
|
|||
s->rx_lro_bytes += rq_stats->lro_bytes;
|
||||
s->rx_csum_none += rq_stats->csum_none;
|
||||
s->rx_csum_complete += rq_stats->csum_complete;
|
||||
s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
|
||||
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
|
||||
s->rx_xdp_drop += rq_stats->xdp_drop;
|
||||
s->rx_xdp_tx += rq_stats->xdp_tx;
|
||||
|
@ -229,14 +229,11 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
|
|||
s->tx_queue_dropped += sq_stats->dropped;
|
||||
s->tx_xmit_more += sq_stats->xmit_more;
|
||||
s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
|
||||
tx_offload_none += sq_stats->csum_none;
|
||||
s->tx_csum_none += sq_stats->csum_none;
|
||||
s->tx_csum_partial += sq_stats->csum_partial;
|
||||
}
|
||||
}
|
||||
|
||||
/* Update calculated offload counters */
|
||||
s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
|
||||
s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
|
||||
|
||||
s->link_down_events_phy = MLX5_GET(ppcnt_reg,
|
||||
priv->stats.pport.phy_counters,
|
||||
counter_set.phys_layer_cntrs.link_down_events);
|
||||
|
@ -3333,8 +3330,8 @@ static int mlx5e_handle_feature(struct net_device *netdev,
|
|||
|
||||
err = feature_handler(netdev, enable);
|
||||
if (err) {
|
||||
netdev_err(netdev, "%s feature 0x%llx failed err %d\n",
|
||||
enable ? "Enable" : "Disable", feature, err);
|
||||
netdev_err(netdev, "%s feature %pNF failed, err %d\n",
|
||||
enable ? "Enable" : "Disable", &feature, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -627,6 +627,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
|
|||
|
||||
if (lro) {
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
rq->stats.csum_unnecessary++;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -644,7 +645,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
|
|||
skb->csum_level = 1;
|
||||
skb->encapsulation = 1;
|
||||
rq->stats.csum_unnecessary_inner++;
|
||||
return;
|
||||
}
|
||||
rq->stats.csum_unnecessary++;
|
||||
return;
|
||||
}
|
||||
csum_none:
|
||||
|
|
|
@ -68,6 +68,7 @@ struct mlx5e_sw_stats {
|
|||
u64 rx_xdp_drop;
|
||||
u64 rx_xdp_tx;
|
||||
u64 rx_xdp_tx_full;
|
||||
u64 tx_csum_none;
|
||||
u64 tx_csum_partial;
|
||||
u64 tx_csum_partial_inner;
|
||||
u64 tx_queue_stopped;
|
||||
|
@ -108,6 +109,7 @@ static const struct counter_desc sw_stats_desc[] = {
|
|||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
|
||||
|
@ -339,6 +341,7 @@ struct mlx5e_rq_stats {
|
|||
u64 packets;
|
||||
u64 bytes;
|
||||
u64 csum_complete;
|
||||
u64 csum_unnecessary;
|
||||
u64 csum_unnecessary_inner;
|
||||
u64 csum_none;
|
||||
u64 lro_packets;
|
||||
|
@ -363,6 +366,7 @@ static const struct counter_desc rq_stats_desc[] = {
|
|||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
|
||||
|
@ -392,6 +396,7 @@ struct mlx5e_sq_stats {
|
|||
u64 tso_bytes;
|
||||
u64 tso_inner_packets;
|
||||
u64 tso_inner_bytes;
|
||||
u64 csum_partial;
|
||||
u64 csum_partial_inner;
|
||||
u64 nop;
|
||||
/* less likely accessed in data path */
|
||||
|
@ -408,6 +413,7 @@ static const struct counter_desc sq_stats_desc[] = {
|
|||
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
|
||||
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
|
||||
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
|
||||
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
|
||||
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
|
||||
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
|
||||
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
|
||||
|
|
|
@ -1317,6 +1317,69 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 upda
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
|
||||
struct tcf_exts *exts)
|
||||
{
|
||||
const struct tc_action *a;
|
||||
bool modify_ip_header;
|
||||
LIST_HEAD(actions);
|
||||
u8 htype, ip_proto;
|
||||
void *headers_v;
|
||||
u16 ethertype;
|
||||
int nkeys, i;
|
||||
|
||||
headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
|
||||
ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
|
||||
|
||||
/* for non-IP we only re-write MACs, so we're okay */
|
||||
if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
|
||||
goto out_ok;
|
||||
|
||||
modify_ip_header = false;
|
||||
tcf_exts_to_list(exts, &actions);
|
||||
list_for_each_entry(a, &actions, list) {
|
||||
if (!is_tcf_pedit(a))
|
||||
continue;
|
||||
|
||||
nkeys = tcf_pedit_nkeys(a);
|
||||
for (i = 0; i < nkeys; i++) {
|
||||
htype = tcf_pedit_htype(a, i);
|
||||
if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
|
||||
htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
|
||||
modify_ip_header = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
|
||||
if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
|
||||
pr_info("can't offload re-write of ip proto %d\n", ip_proto);
|
||||
return false;
|
||||
}
|
||||
|
||||
out_ok:
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool actions_match_supported(struct mlx5e_priv *priv,
|
||||
struct tcf_exts *exts,
|
||||
struct mlx5e_tc_flow_parse_attr *parse_attr,
|
||||
struct mlx5e_tc_flow *flow)
|
||||
{
|
||||
u32 actions;
|
||||
|
||||
if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
|
||||
actions = flow->esw_attr->action;
|
||||
else
|
||||
actions = flow->nic_attr->action;
|
||||
|
||||
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
|
||||
return modify_header_match_supported(&parse_attr->spec, exts);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
||||
struct mlx5e_tc_flow_parse_attr *parse_attr,
|
||||
struct mlx5e_tc_flow *flow)
|
||||
|
@ -1378,6 +1441,9 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!actions_match_supported(priv, exts, parse_attr, flow))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1564,7 +1630,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
|
|||
break;
|
||||
default:
|
||||
err = -EOPNOTSUPP;
|
||||
goto out;
|
||||
goto free_encap;
|
||||
}
|
||||
fl4.flowi4_tos = tun_key->tos;
|
||||
fl4.daddr = tun_key->u.ipv4.dst;
|
||||
|
@ -1573,7 +1639,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
|
|||
err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
|
||||
&fl4, &n, &ttl);
|
||||
if (err)
|
||||
goto out;
|
||||
goto free_encap;
|
||||
|
||||
/* used by mlx5e_detach_encap to lookup a neigh hash table
|
||||
* entry in the neigh hash table when a user deletes a rule
|
||||
|
@ -1590,7 +1656,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
|
|||
*/
|
||||
err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
|
||||
if (err)
|
||||
goto out;
|
||||
goto free_encap;
|
||||
|
||||
read_lock_bh(&n->lock);
|
||||
nud_state = n->nud_state;
|
||||
|
@ -1630,8 +1696,9 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
|
|||
|
||||
destroy_neigh_entry:
|
||||
mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
|
||||
out:
|
||||
free_encap:
|
||||
kfree(encap_header);
|
||||
out:
|
||||
if (n)
|
||||
neigh_release(n);
|
||||
return err;
|
||||
|
@ -1668,7 +1735,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
|
|||
break;
|
||||
default:
|
||||
err = -EOPNOTSUPP;
|
||||
goto out;
|
||||
goto free_encap;
|
||||
}
|
||||
|
||||
fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
|
||||
|
@ -1678,7 +1745,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
|
|||
err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
|
||||
&fl6, &n, &ttl);
|
||||
if (err)
|
||||
goto out;
|
||||
goto free_encap;
|
||||
|
||||
/* used by mlx5e_detach_encap to lookup a neigh hash table
|
||||
* entry in the neigh hash table when a user deletes a rule
|
||||
|
@ -1695,7 +1762,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
|
|||
*/
|
||||
err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
|
||||
if (err)
|
||||
goto out;
|
||||
goto free_encap;
|
||||
|
||||
read_lock_bh(&n->lock);
|
||||
nud_state = n->nud_state;
|
||||
|
@ -1736,8 +1803,9 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
|
|||
|
||||
destroy_neigh_entry:
|
||||
mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
|
||||
out:
|
||||
free_encap:
|
||||
kfree(encap_header);
|
||||
out:
|
||||
if (n)
|
||||
neigh_release(n);
|
||||
return err;
|
||||
|
@ -1791,6 +1859,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
|
|||
}
|
||||
}
|
||||
|
||||
/* must verify if encap is valid or not */
|
||||
if (found)
|
||||
goto attach_flow;
|
||||
|
||||
|
@ -1817,6 +1886,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
|
|||
*encap_dev = e->out_dev;
|
||||
if (e->flags & MLX5_ENCAP_ENTRY_VALID)
|
||||
attr->encap_id = e->encap_id;
|
||||
else
|
||||
err = -EAGAIN;
|
||||
|
||||
return err;
|
||||
|
||||
|
@ -1934,6 +2005,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
|||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!actions_match_supported(priv, exts, parse_attr, flow))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -193,6 +193,7 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct
|
|||
sq->stats.csum_partial_inner++;
|
||||
} else {
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
|
||||
sq->stats.csum_partial++;
|
||||
}
|
||||
} else
|
||||
sq->stats.csum_none++;
|
||||
|
|
|
@ -71,11 +71,11 @@ int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps)
|
||||
int mlx5_fpga_caps(struct mlx5_core_dev *dev)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(fpga_cap)] = {0};
|
||||
|
||||
return mlx5_core_access_reg(dev, in, sizeof(in), caps,
|
||||
return mlx5_core_access_reg(dev, in, sizeof(in), dev->caps.fpga,
|
||||
MLX5_ST_SZ_BYTES(fpga_cap),
|
||||
MLX5_REG_FPGA_CAP, 0, 0);
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ struct mlx5_fpga_qp_counters {
|
|||
u64 rx_total_drop;
|
||||
};
|
||||
|
||||
int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps);
|
||||
int mlx5_fpga_caps(struct mlx5_core_dev *dev);
|
||||
int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query);
|
||||
int mlx5_fpga_ctrl_op(struct mlx5_core_dev *dev, u8 op);
|
||||
int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr,
|
||||
|
|
|
@ -139,8 +139,7 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
|
|||
if (err)
|
||||
goto out;
|
||||
|
||||
err = mlx5_fpga_caps(fdev->mdev,
|
||||
fdev->mdev->caps.hca_cur[MLX5_CAP_FPGA]);
|
||||
err = mlx5_fpga_caps(fdev->mdev);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -293,6 +293,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
|
|||
}
|
||||
|
||||
if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
|
||||
int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
|
||||
log_max_flow_counter,
|
||||
ft->type));
|
||||
int list_size = 0;
|
||||
|
||||
list_for_each_entry(dst, &fte->node.children, node.list) {
|
||||
|
@ -305,12 +308,17 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
|
|||
in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
|
||||
list_size++;
|
||||
}
|
||||
if (list_size > max_list_size) {
|
||||
err = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
|
||||
list_size);
|
||||
}
|
||||
|
||||
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
|
||||
err_out:
|
||||
kvfree(in);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -52,6 +52,7 @@ enum fs_flow_table_type {
|
|||
FS_FT_FDB = 0X4,
|
||||
FS_FT_SNIFFER_RX = 0X5,
|
||||
FS_FT_SNIFFER_TX = 0X6,
|
||||
FS_FT_MAX_TYPE = FS_FT_SNIFFER_TX,
|
||||
};
|
||||
|
||||
enum fs_flow_table_op_mod {
|
||||
|
@ -260,4 +261,14 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
|
|||
#define fs_for_each_dst(pos, fte) \
|
||||
fs_list_for_each_entry(pos, &(fte)->node.children)
|
||||
|
||||
#define MLX5_CAP_FLOWTABLE_TYPE(mdev, cap, type) ( \
|
||||
(type == FS_FT_NIC_RX) ? MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) : \
|
||||
(type == FS_FT_ESW_EGRESS_ACL) ? MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) : \
|
||||
(type == FS_FT_ESW_INGRESS_ACL) ? MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) : \
|
||||
(type == FS_FT_FDB) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
|
||||
(type == FS_FT_SNIFFER_RX) ? MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) : \
|
||||
(type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \
|
||||
(BUILD_BUG_ON_ZERO(FS_FT_SNIFFER_TX != FS_FT_MAX_TYPE))\
|
||||
)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -572,12 +572,13 @@ void mlx5_rdma_netdev_free(struct net_device *netdev)
|
|||
{
|
||||
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
|
||||
const struct mlx5e_profile *profile = priv->profile;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
|
||||
mlx5e_detach_netdev(priv);
|
||||
profile->cleanup(priv);
|
||||
destroy_workqueue(priv->wq);
|
||||
free_netdev(netdev);
|
||||
|
||||
mlx5e_destroy_mdev_resources(priv->mdev);
|
||||
mlx5e_destroy_mdev_resources(mdev);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_rdma_netdev_free);
|
||||
|
|
|
@ -2723,6 +2723,7 @@ static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
|
|||
mlxsw_sp_nexthop_rif_fini(nh);
|
||||
break;
|
||||
case MLXSW_SP_NEXTHOP_TYPE_IPIP:
|
||||
mlxsw_sp_nexthop_rif_fini(nh);
|
||||
mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
|
||||
break;
|
||||
}
|
||||
|
@ -2742,7 +2743,11 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
|
|||
router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
|
||||
MLXSW_SP_L3_PROTO_IPV4)) {
|
||||
nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
|
||||
return mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev);
|
||||
err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev);
|
||||
if (err)
|
||||
return err;
|
||||
mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
|
||||
return 0;
|
||||
}
|
||||
|
||||
nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
|
||||
|
@ -4009,7 +4014,11 @@ static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
|
|||
router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
|
||||
MLXSW_SP_L3_PROTO_IPV6)) {
|
||||
nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
|
||||
return mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev);
|
||||
err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev);
|
||||
if (err)
|
||||
return err;
|
||||
mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
|
||||
return 0;
|
||||
}
|
||||
|
||||
nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
|
||||
|
@ -5068,6 +5077,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
|
|||
vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
|
||||
if (IS_ERR(vr))
|
||||
return ERR_CAST(vr);
|
||||
vr->rif_count++;
|
||||
|
||||
err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
|
||||
if (err)
|
||||
|
@ -5099,7 +5109,6 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
|
|||
|
||||
mlxsw_sp_rif_counters_alloc(rif);
|
||||
mlxsw_sp->router->rifs[rif_index] = rif;
|
||||
vr->rif_count++;
|
||||
|
||||
return rif;
|
||||
|
||||
|
@ -5110,6 +5119,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
|
|||
kfree(rif);
|
||||
err_rif_alloc:
|
||||
err_rif_index_alloc:
|
||||
vr->rif_count--;
|
||||
mlxsw_sp_vr_put(vr);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
@ -5124,7 +5134,6 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
|
|||
mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
|
||||
vr = &mlxsw_sp->router->vrs[rif->vr_id];
|
||||
|
||||
vr->rif_count--;
|
||||
mlxsw_sp->router->rifs[rif->rif_index] = NULL;
|
||||
mlxsw_sp_rif_counters_free(rif);
|
||||
ops->deconfigure(rif);
|
||||
|
@ -5132,6 +5141,7 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
|
|||
/* Loopback RIFs are not associated with a FID. */
|
||||
mlxsw_sp_fid_put(fid);
|
||||
kfree(rif);
|
||||
vr->rif_count--;
|
||||
mlxsw_sp_vr_put(vr);
|
||||
}
|
||||
|
||||
|
|
|
@ -898,7 +898,8 @@ static void emac_mac_rx_descs_refill(struct emac_adapter *adpt,
|
|||
|
||||
curr_rxbuf->dma_addr =
|
||||
dma_map_single(adpt->netdev->dev.parent, skb->data,
|
||||
curr_rxbuf->length, DMA_FROM_DEVICE);
|
||||
adpt->rxbuf_size, DMA_FROM_DEVICE);
|
||||
|
||||
ret = dma_mapping_error(adpt->netdev->dev.parent,
|
||||
curr_rxbuf->dma_addr);
|
||||
if (ret) {
|
||||
|
|
|
@ -51,10 +51,7 @@ struct rmnet_walk_data {
|
|||
|
||||
static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
|
||||
{
|
||||
rx_handler_func_t *rx_handler;
|
||||
|
||||
rx_handler = rcu_dereference(real_dev->rx_handler);
|
||||
return (rx_handler == rmnet_rx_handler);
|
||||
return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
|
||||
}
|
||||
|
||||
/* Needs rtnl lock */
|
||||
|
|
|
@ -139,40 +139,52 @@ rocker_tlv_start(struct rocker_desc_info *desc_info)
|
|||
int rocker_tlv_put(struct rocker_desc_info *desc_info,
|
||||
int attrtype, int attrlen, const void *data);
|
||||
|
||||
static inline int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
|
||||
int attrtype, u8 value)
|
||||
static inline int
|
||||
rocker_tlv_put_u8(struct rocker_desc_info *desc_info, int attrtype, u8 value)
|
||||
{
|
||||
return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
|
||||
u8 tmp = value; /* work around GCC PR81715 */
|
||||
|
||||
return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &tmp);
|
||||
}
|
||||
|
||||
static inline int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
|
||||
int attrtype, u16 value)
|
||||
static inline int
|
||||
rocker_tlv_put_u16(struct rocker_desc_info *desc_info, int attrtype, u16 value)
|
||||
{
|
||||
return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
|
||||
u16 tmp = value;
|
||||
|
||||
return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &tmp);
|
||||
}
|
||||
|
||||
static inline int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
|
||||
int attrtype, __be16 value)
|
||||
static inline int
|
||||
rocker_tlv_put_be16(struct rocker_desc_info *desc_info, int attrtype, __be16 value)
|
||||
{
|
||||
return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
|
||||
__be16 tmp = value;
|
||||
|
||||
return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &tmp);
|
||||
}
|
||||
|
||||
static inline int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
|
||||
int attrtype, u32 value)
|
||||
static inline int
|
||||
rocker_tlv_put_u32(struct rocker_desc_info *desc_info, int attrtype, u32 value)
|
||||
{
|
||||
return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
|
||||
u32 tmp = value;
|
||||
|
||||
return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &tmp);
|
||||
}
|
||||
|
||||
static inline int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
|
||||
int attrtype, __be32 value)
|
||||
static inline int
|
||||
rocker_tlv_put_be32(struct rocker_desc_info *desc_info, int attrtype, __be32 value)
|
||||
{
|
||||
return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
|
||||
__be32 tmp = value;
|
||||
|
||||
return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &tmp);
|
||||
}
|
||||
|
||||
static inline int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
|
||||
int attrtype, u64 value)
|
||||
static inline int
|
||||
rocker_tlv_put_u64(struct rocker_desc_info *desc_info, int attrtype, u64 value)
|
||||
{
|
||||
return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
|
||||
u64 tmp = value;
|
||||
|
||||
return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &tmp);
|
||||
}
|
||||
|
||||
static inline struct rocker_tlv *
|
||||
|
|
|
@ -511,6 +511,7 @@ static struct platform_driver dwc_eth_dwmac_driver = {
|
|||
.remove = dwc_eth_dwmac_remove,
|
||||
.driver = {
|
||||
.name = "dwc-eth-dwmac",
|
||||
.pm = &stmmac_pltfr_pm_ops,
|
||||
.of_match_table = dwc_eth_dwmac_match,
|
||||
},
|
||||
};
|
||||
|
|
|
@ -83,6 +83,117 @@ struct rk_priv_data {
|
|||
(((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
|
||||
((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
|
||||
|
||||
#define RK3128_GRF_MAC_CON0 0x0168
|
||||
#define RK3128_GRF_MAC_CON1 0x016c
|
||||
|
||||
/* RK3128_GRF_MAC_CON0 */
|
||||
#define RK3128_GMAC_TXCLK_DLY_ENABLE GRF_BIT(14)
|
||||
#define RK3128_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
|
||||
#define RK3128_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
|
||||
#define RK3128_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
|
||||
#define RK3128_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
|
||||
#define RK3128_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
|
||||
|
||||
/* RK3128_GRF_MAC_CON1 */
|
||||
#define RK3128_GMAC_PHY_INTF_SEL_RGMII \
|
||||
(GRF_BIT(6) | GRF_CLR_BIT(7) | GRF_CLR_BIT(8))
|
||||
#define RK3128_GMAC_PHY_INTF_SEL_RMII \
|
||||
(GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | GRF_BIT(8))
|
||||
#define RK3128_GMAC_FLOW_CTRL GRF_BIT(9)
|
||||
#define RK3128_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9)
|
||||
#define RK3128_GMAC_SPEED_10M GRF_CLR_BIT(10)
|
||||
#define RK3128_GMAC_SPEED_100M GRF_BIT(10)
|
||||
#define RK3128_GMAC_RMII_CLK_25M GRF_BIT(11)
|
||||
#define RK3128_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11)
|
||||
#define RK3128_GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13))
|
||||
#define RK3128_GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13))
|
||||
#define RK3128_GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13))
|
||||
#define RK3128_GMAC_RMII_MODE GRF_BIT(14)
|
||||
#define RK3128_GMAC_RMII_MODE_CLR GRF_CLR_BIT(14)
|
||||
|
||||
static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
|
||||
int tx_delay, int rx_delay)
|
||||
{
|
||||
struct device *dev = &bsp_priv->pdev->dev;
|
||||
|
||||
if (IS_ERR(bsp_priv->grf)) {
|
||||
dev_err(dev, "Missing rockchip,grf property\n");
|
||||
return;
|
||||
}
|
||||
|
||||
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
|
||||
RK3128_GMAC_PHY_INTF_SEL_RGMII |
|
||||
RK3128_GMAC_RMII_MODE_CLR);
|
||||
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON0,
|
||||
DELAY_ENABLE(RK3128, tx_delay, rx_delay) |
|
||||
RK3128_GMAC_CLK_RX_DL_CFG(rx_delay) |
|
||||
RK3128_GMAC_CLK_TX_DL_CFG(tx_delay));
|
||||
}
|
||||
|
||||
static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv)
|
||||
{
|
||||
struct device *dev = &bsp_priv->pdev->dev;
|
||||
|
||||
if (IS_ERR(bsp_priv->grf)) {
|
||||
dev_err(dev, "Missing rockchip,grf property\n");
|
||||
return;
|
||||
}
|
||||
|
||||
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
|
||||
RK3128_GMAC_PHY_INTF_SEL_RMII | RK3128_GMAC_RMII_MODE);
|
||||
}
|
||||
|
||||
static void rk3128_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
|
||||
{
|
||||
struct device *dev = &bsp_priv->pdev->dev;
|
||||
|
||||
if (IS_ERR(bsp_priv->grf)) {
|
||||
dev_err(dev, "Missing rockchip,grf property\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (speed == 10)
|
||||
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
|
||||
RK3128_GMAC_CLK_2_5M);
|
||||
else if (speed == 100)
|
||||
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
|
||||
RK3128_GMAC_CLK_25M);
|
||||
else if (speed == 1000)
|
||||
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
|
||||
RK3128_GMAC_CLK_125M);
|
||||
else
|
||||
dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
|
||||
}
|
||||
|
||||
static void rk3128_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
|
||||
{
|
||||
struct device *dev = &bsp_priv->pdev->dev;
|
||||
|
||||
if (IS_ERR(bsp_priv->grf)) {
|
||||
dev_err(dev, "Missing rockchip,grf property\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (speed == 10) {
|
||||
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
|
||||
RK3128_GMAC_RMII_CLK_2_5M |
|
||||
RK3128_GMAC_SPEED_10M);
|
||||
} else if (speed == 100) {
|
||||
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
|
||||
RK3128_GMAC_RMII_CLK_25M |
|
||||
RK3128_GMAC_SPEED_100M);
|
||||
} else {
|
||||
dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct rk_gmac_ops rk3128_ops = {
|
||||
.set_to_rgmii = rk3128_set_to_rgmii,
|
||||
.set_to_rmii = rk3128_set_to_rmii,
|
||||
.set_rgmii_speed = rk3128_set_rgmii_speed,
|
||||
.set_rmii_speed = rk3128_set_rmii_speed,
|
||||
};
|
||||
|
||||
#define RK3228_GRF_MAC_CON0 0x0900
|
||||
#define RK3228_GRF_MAC_CON1 0x0904
|
||||
|
||||
|
@ -1313,6 +1424,7 @@ static int rk_gmac_resume(struct device *dev)
|
|||
static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume);
|
||||
|
||||
static const struct of_device_id rk_gmac_dwmac_match[] = {
|
||||
{ .compatible = "rockchip,rk3128-gmac", .data = &rk3128_ops },
|
||||
{ .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
|
||||
{ .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
|
||||
{ .compatible = "rockchip,rk3328-gmac", .data = &rk3328_ops },
|
||||
|
|
|
@ -296,6 +296,7 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
|
|||
{
|
||||
void __iomem *ioaddr = hw->pcsr;
|
||||
unsigned int pmt = 0;
|
||||
u32 config;
|
||||
|
||||
if (mode & WAKE_MAGIC) {
|
||||
pr_debug("GMAC: WOL Magic frame\n");
|
||||
|
@ -306,6 +307,12 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
|
|||
pmt |= power_down | global_unicast | wake_up_frame_en;
|
||||
}
|
||||
|
||||
if (pmt) {
|
||||
/* The receiver must be enabled for WOL before powering down */
|
||||
config = readl(ioaddr + GMAC_CONFIG);
|
||||
config |= GMAC_CONFIG_RE;
|
||||
writel(config, ioaddr + GMAC_CONFIG);
|
||||
}
|
||||
writel(pmt, ioaddr + GMAC_PMT);
|
||||
}
|
||||
|
||||
|
|
|
@ -120,7 +120,7 @@ struct ppp {
|
|||
int n_channels; /* how many channels are attached 54 */
|
||||
spinlock_t rlock; /* lock for receive side 58 */
|
||||
spinlock_t wlock; /* lock for transmit side 5c */
|
||||
int *xmit_recursion __percpu; /* xmit recursion detect */
|
||||
int __percpu *xmit_recursion; /* xmit recursion detect */
|
||||
int mru; /* max receive unit 60 */
|
||||
unsigned int flags; /* control bits 64 */
|
||||
unsigned int xstate; /* transmit state bits 68 */
|
||||
|
|
|
@ -1496,11 +1496,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
|||
switch (tun->flags & TUN_TYPE_MASK) {
|
||||
case IFF_TUN:
|
||||
if (tun->flags & IFF_NO_PI) {
|
||||
switch (skb->data[0] & 0xf0) {
|
||||
case 0x40:
|
||||
u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
|
||||
|
||||
switch (ip_version) {
|
||||
case 4:
|
||||
pi.proto = htons(ETH_P_IP);
|
||||
break;
|
||||
case 0x60:
|
||||
case 6:
|
||||
pi.proto = htons(ETH_P_IPV6);
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -54,11 +54,19 @@ static int is_wireless_rndis(struct usb_interface_descriptor *desc)
|
|||
desc->bInterfaceProtocol == 3);
|
||||
}
|
||||
|
||||
static int is_novatel_rndis(struct usb_interface_descriptor *desc)
|
||||
{
|
||||
return (desc->bInterfaceClass == USB_CLASS_MISC &&
|
||||
desc->bInterfaceSubClass == 4 &&
|
||||
desc->bInterfaceProtocol == 1);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#define is_rndis(desc) 0
|
||||
#define is_activesync(desc) 0
|
||||
#define is_wireless_rndis(desc) 0
|
||||
#define is_novatel_rndis(desc) 0
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -150,7 +158,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
|
|||
*/
|
||||
rndis = (is_rndis(&intf->cur_altsetting->desc) ||
|
||||
is_activesync(&intf->cur_altsetting->desc) ||
|
||||
is_wireless_rndis(&intf->cur_altsetting->desc));
|
||||
is_wireless_rndis(&intf->cur_altsetting->desc) ||
|
||||
is_novatel_rndis(&intf->cur_altsetting->desc));
|
||||
|
||||
memset(info, 0, sizeof(*info));
|
||||
info->control = intf;
|
||||
|
@ -547,6 +556,7 @@ static const struct driver_info wwan_info = {
|
|||
#define REALTEK_VENDOR_ID 0x0bda
|
||||
#define SAMSUNG_VENDOR_ID 0x04e8
|
||||
#define LENOVO_VENDOR_ID 0x17ef
|
||||
#define LINKSYS_VENDOR_ID 0x13b1
|
||||
#define NVIDIA_VENDOR_ID 0x0955
|
||||
#define HP_VENDOR_ID 0x03f0
|
||||
#define MICROSOFT_VENDOR_ID 0x045e
|
||||
|
@ -737,6 +747,15 @@ static const struct usb_device_id products[] = {
|
|||
.driver_info = 0,
|
||||
},
|
||||
|
||||
#if IS_ENABLED(CONFIG_USB_RTL8152)
|
||||
/* Linksys USB3GIGV1 Ethernet Adapter */
|
||||
{
|
||||
USB_DEVICE_AND_INTERFACE_INFO(LINKSYS_VENDOR_ID, 0x0041, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
|
||||
.driver_info = 0,
|
||||
},
|
||||
#endif
|
||||
|
||||
/* ThinkPad USB-C Dock (based on Realtek RTL8153) */
|
||||
{
|
||||
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM,
|
||||
|
|
|
@ -613,6 +613,7 @@ enum rtl8152_flags {
|
|||
#define VENDOR_ID_MICROSOFT 0x045e
|
||||
#define VENDOR_ID_SAMSUNG 0x04e8
|
||||
#define VENDOR_ID_LENOVO 0x17ef
|
||||
#define VENDOR_ID_LINKSYS 0x13b1
|
||||
#define VENDOR_ID_NVIDIA 0x0955
|
||||
|
||||
#define MCU_TYPE_PLA 0x0100
|
||||
|
@ -5316,6 +5317,7 @@ static const struct usb_device_id rtl8152_table[] = {
|
|||
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
|
||||
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)},
|
||||
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)},
|
||||
{REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
|
||||
{REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
|
||||
{}
|
||||
};
|
||||
|
|
|
@ -632,6 +632,10 @@ static const struct usb_device_id products [] = {
|
|||
/* RNDIS for tethering */
|
||||
USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3),
|
||||
.driver_info = (unsigned long) &rndis_info,
|
||||
}, {
|
||||
/* Novatel Verizon USB730L */
|
||||
USB_INTERFACE_INFO(USB_CLASS_MISC, 4, 1),
|
||||
.driver_info = (unsigned long) &rndis_info,
|
||||
},
|
||||
{ }, // END
|
||||
};
|
||||
|
|
|
@ -3396,9 +3396,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
|
|||
|
||||
MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
static int ath10k_pci_pm_suspend(struct device *dev)
|
||||
static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev)
|
||||
{
|
||||
struct ath10k *ar = dev_get_drvdata(dev);
|
||||
int ret;
|
||||
|
@ -3414,7 +3412,7 @@ static int ath10k_pci_pm_suspend(struct device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int ath10k_pci_pm_resume(struct device *dev)
|
||||
static __maybe_unused int ath10k_pci_pm_resume(struct device *dev)
|
||||
{
|
||||
struct ath10k *ar = dev_get_drvdata(dev);
|
||||
int ret;
|
||||
|
@ -3433,7 +3431,6 @@ static int ath10k_pci_pm_resume(struct device *dev)
|
|||
static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops,
|
||||
ath10k_pci_pm_suspend,
|
||||
ath10k_pci_pm_resume);
|
||||
#endif
|
||||
|
||||
static struct pci_driver ath10k_pci_driver = {
|
||||
.name = "ath10k_pci",
|
||||
|
|
|
@ -980,7 +980,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
|
|||
|
||||
eth_broadcast_addr(params_le->bssid);
|
||||
params_le->bss_type = DOT11_BSSTYPE_ANY;
|
||||
params_le->scan_type = 0;
|
||||
params_le->scan_type = BRCMF_SCANTYPE_ACTIVE;
|
||||
params_le->channel_num = 0;
|
||||
params_le->nprobes = cpu_to_le32(-1);
|
||||
params_le->active_time = cpu_to_le32(-1);
|
||||
|
@ -988,12 +988,9 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
|
|||
params_le->home_time = cpu_to_le32(-1);
|
||||
memset(¶ms_le->ssid_le, 0, sizeof(params_le->ssid_le));
|
||||
|
||||
/* if request is null exit so it will be all channel broadcast scan */
|
||||
if (!request)
|
||||
return;
|
||||
|
||||
n_ssids = request->n_ssids;
|
||||
n_channels = request->n_channels;
|
||||
|
||||
/* Copy channel array if applicable */
|
||||
brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n",
|
||||
n_channels);
|
||||
|
@ -1030,16 +1027,8 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
|
|||
ptr += sizeof(ssid_le);
|
||||
}
|
||||
} else {
|
||||
brcmf_dbg(SCAN, "Broadcast scan %p\n", request->ssids);
|
||||
if ((request->ssids) && request->ssids->ssid_len) {
|
||||
brcmf_dbg(SCAN, "SSID %s len=%d\n",
|
||||
params_le->ssid_le.SSID,
|
||||
request->ssids->ssid_len);
|
||||
params_le->ssid_le.SSID_len =
|
||||
cpu_to_le32(request->ssids->ssid_len);
|
||||
memcpy(¶ms_le->ssid_le.SSID, request->ssids->ssid,
|
||||
request->ssids->ssid_len);
|
||||
}
|
||||
brcmf_dbg(SCAN, "Performing passive scan\n");
|
||||
params_le->scan_type = BRCMF_SCANTYPE_PASSIVE;
|
||||
}
|
||||
/* Adding mask to channel numbers */
|
||||
params_le->channel_num =
|
||||
|
@ -3162,6 +3151,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
|
|||
struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
|
||||
s32 status;
|
||||
struct brcmf_escan_result_le *escan_result_le;
|
||||
u32 escan_buflen;
|
||||
struct brcmf_bss_info_le *bss_info_le;
|
||||
struct brcmf_bss_info_le *bss = NULL;
|
||||
u32 bi_length;
|
||||
|
@ -3181,11 +3171,23 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
|
|||
|
||||
if (status == BRCMF_E_STATUS_PARTIAL) {
|
||||
brcmf_dbg(SCAN, "ESCAN Partial result\n");
|
||||
if (e->datalen < sizeof(*escan_result_le)) {
|
||||
brcmf_err("invalid event data length\n");
|
||||
goto exit;
|
||||
}
|
||||
escan_result_le = (struct brcmf_escan_result_le *) data;
|
||||
if (!escan_result_le) {
|
||||
brcmf_err("Invalid escan result (NULL pointer)\n");
|
||||
goto exit;
|
||||
}
|
||||
escan_buflen = le32_to_cpu(escan_result_le->buflen);
|
||||
if (escan_buflen > BRCMF_ESCAN_BUF_SIZE ||
|
||||
escan_buflen > e->datalen ||
|
||||
escan_buflen < sizeof(*escan_result_le)) {
|
||||
brcmf_err("Invalid escan buffer length: %d\n",
|
||||
escan_buflen);
|
||||
goto exit;
|
||||
}
|
||||
if (le16_to_cpu(escan_result_le->bss_count) != 1) {
|
||||
brcmf_err("Invalid bss_count %d: ignoring\n",
|
||||
escan_result_le->bss_count);
|
||||
|
@ -3202,9 +3204,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
|
|||
}
|
||||
|
||||
bi_length = le32_to_cpu(bss_info_le->length);
|
||||
if (bi_length != (le32_to_cpu(escan_result_le->buflen) -
|
||||
WL_ESCAN_RESULTS_FIXED_SIZE)) {
|
||||
brcmf_err("Invalid bss_info length %d: ignoring\n",
|
||||
if (bi_length != escan_buflen - WL_ESCAN_RESULTS_FIXED_SIZE) {
|
||||
brcmf_err("Ignoring invalid bss_info length: %d\n",
|
||||
bi_length);
|
||||
goto exit;
|
||||
}
|
||||
|
|
|
@ -45,6 +45,11 @@
|
|||
#define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff
|
||||
#define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16
|
||||
|
||||
/* scan type definitions */
|
||||
#define BRCMF_SCANTYPE_DEFAULT 0xFF
|
||||
#define BRCMF_SCANTYPE_ACTIVE 0
|
||||
#define BRCMF_SCANTYPE_PASSIVE 1
|
||||
|
||||
#define BRCMF_WSEC_MAX_PSK_LEN 32
|
||||
#define BRCMF_WSEC_PASSPHRASE BIT(0)
|
||||
|
||||
|
|
|
@ -2167,7 +2167,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
|
|||
* 1. We are not using a unified image
|
||||
* 2. We are using a unified image but had an error while exiting D3
|
||||
*/
|
||||
set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
|
||||
set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
|
||||
set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
|
||||
/*
|
||||
* When switching images we return 1, which causes mac80211
|
||||
|
|
|
@ -1546,6 +1546,11 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
|
|||
struct iwl_mvm_mc_iter_data *data = _data;
|
||||
struct iwl_mvm *mvm = data->mvm;
|
||||
struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.id = MCAST_FILTER_CMD,
|
||||
.flags = CMD_ASYNC,
|
||||
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
|
||||
};
|
||||
int ret, len;
|
||||
|
||||
/* if we don't have free ports, mcast frames will be dropped */
|
||||
|
@ -1560,7 +1565,10 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
|
|||
memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
|
||||
len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
|
||||
hcmd.len[0] = len;
|
||||
hcmd.data[0] = cmd;
|
||||
|
||||
ret = iwl_mvm_send_cmd(mvm, &hcmd);
|
||||
if (ret)
|
||||
IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
|
||||
}
|
||||
|
@ -1635,6 +1643,12 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
|
|||
if (!cmd)
|
||||
goto out;
|
||||
|
||||
if (changed_flags & FIF_ALLMULTI)
|
||||
cmd->pass_all = !!(*total_flags & FIF_ALLMULTI);
|
||||
|
||||
if (cmd->pass_all)
|
||||
cmd->count = 0;
|
||||
|
||||
iwl_mvm_recalc_multicast(mvm);
|
||||
out:
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
@ -2563,7 +2577,7 @@ static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
|
|||
* queues, so we should never get a second deferred
|
||||
* frame for the RA/TID.
|
||||
*/
|
||||
iwl_mvm_start_mac_queues(mvm, info->hw_queue);
|
||||
iwl_mvm_start_mac_queues(mvm, BIT(info->hw_queue));
|
||||
ieee80211_free_txskb(mvm->hw, skb);
|
||||
}
|
||||
}
|
||||
|
@ -3975,6 +3989,43 @@ static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
|
||||
{
|
||||
if (drop) {
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
/* TODO new tx api */
|
||||
WARN_ONCE(1,
|
||||
"Need to implement flush TX queue\n");
|
||||
else
|
||||
iwl_mvm_flush_tx_path(mvm,
|
||||
iwl_mvm_flushable_queues(mvm) & queues,
|
||||
0);
|
||||
} else {
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
struct ieee80211_sta *sta;
|
||||
int i;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
|
||||
sta = rcu_dereference_protected(
|
||||
mvm->fw_id_to_mac_id[i],
|
||||
lockdep_is_held(&mvm->mutex));
|
||||
if (IS_ERR_OR_NULL(sta))
|
||||
continue;
|
||||
|
||||
iwl_mvm_wait_sta_queues_empty(mvm,
|
||||
iwl_mvm_sta_from_mac80211(sta));
|
||||
}
|
||||
|
||||
mutex_unlock(&mvm->mutex);
|
||||
} else {
|
||||
iwl_trans_wait_tx_queues_empty(mvm->trans,
|
||||
queues);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif, u32 queues, bool drop)
|
||||
{
|
||||
|
@ -3985,7 +4036,12 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
|
|||
int i;
|
||||
u32 msk = 0;
|
||||
|
||||
if (!vif || vif->type != NL80211_IFTYPE_STATION)
|
||||
if (!vif) {
|
||||
iwl_mvm_flush_no_vif(mvm, queues, drop);
|
||||
return;
|
||||
}
|
||||
|
||||
if (vif->type != NL80211_IFTYPE_STATION)
|
||||
return;
|
||||
|
||||
/* Make sure we're done with the deferred traffic before flushing */
|
||||
|
|
|
@ -661,7 +661,8 @@ static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
|
|||
(lq_sta->tx_agg_tid_en & BIT(tid)) &&
|
||||
(tid_data->tx_count_last >= IWL_MVM_RS_AGG_START_THRESHOLD)) {
|
||||
IWL_DEBUG_RATE(mvm, "try to aggregate tid %d\n", tid);
|
||||
rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta);
|
||||
if (rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta) == 0)
|
||||
tid_data->state = IWL_AGG_QUEUED;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -672,11 +672,12 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
|
|||
* If there was a significant jump in the nssn - adjust.
|
||||
* If the SN is smaller than the NSSN it might need to first go into
|
||||
* the reorder buffer, in which case we just release up to it and the
|
||||
* rest of the function will take of storing it and releasing up to the
|
||||
* nssn
|
||||
* rest of the function will take care of storing it and releasing up to
|
||||
* the nssn
|
||||
*/
|
||||
if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
|
||||
buffer->buf_size)) {
|
||||
buffer->buf_size) ||
|
||||
!ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) {
|
||||
u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
|
||||
|
||||
iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn);
|
||||
|
|
|
@ -555,7 +555,7 @@ static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm)
|
|||
struct iwl_host_cmd cmd = {
|
||||
.id = SCAN_OFFLOAD_ABORT_CMD,
|
||||
};
|
||||
u32 status;
|
||||
u32 status = CAN_ABORT_STATUS;
|
||||
|
||||
ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
|
||||
if (ret)
|
||||
|
|
|
@ -1285,7 +1285,7 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
|
|||
{
|
||||
struct iwl_mvm_add_sta_cmd cmd;
|
||||
int ret;
|
||||
u32 status;
|
||||
u32 status = ADD_STA_SUCCESS;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
|
@ -2385,8 +2385,10 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
|
||||
return -EINVAL;
|
||||
|
||||
if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
|
||||
IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
|
||||
if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
|
||||
mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
|
||||
IWL_ERR(mvm,
|
||||
"Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
|
||||
mvmsta->tid_data[tid].state);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
|
|
@ -281,6 +281,7 @@ struct iwl_mvm_vif;
|
|||
* These states relate to a specific RA / TID.
|
||||
*
|
||||
* @IWL_AGG_OFF: aggregation is not used
|
||||
* @IWL_AGG_QUEUED: aggregation start work has been queued
|
||||
* @IWL_AGG_STARTING: aggregation are starting (between start and oper)
|
||||
* @IWL_AGG_ON: aggregation session is up
|
||||
* @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
|
||||
|
@ -290,6 +291,7 @@ struct iwl_mvm_vif;
|
|||
*/
|
||||
enum iwl_mvm_agg_state {
|
||||
IWL_AGG_OFF = 0,
|
||||
IWL_AGG_QUEUED,
|
||||
IWL_AGG_STARTING,
|
||||
IWL_AGG_ON,
|
||||
IWL_EMPTYING_HW_QUEUE_ADDBA,
|
||||
|
|
|
@ -529,6 +529,7 @@ int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state)
|
|||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
status = 0;
|
||||
ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP,
|
||||
CTDP_CONFIG_CMD),
|
||||
sizeof(cmd), &cmd, &status);
|
||||
|
|
|
@ -564,8 +564,8 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
|
|||
case NL80211_IFTYPE_AP:
|
||||
case NL80211_IFTYPE_ADHOC:
|
||||
/*
|
||||
* Handle legacy hostapd as well, where station will be added
|
||||
* only just before sending the association response.
|
||||
* Non-bufferable frames use the broadcast station, thus they
|
||||
* use the probe queue.
|
||||
* Also take care of the case where we send a deauth to a
|
||||
* station that we don't have, or similarly an association
|
||||
* response (with non-success status) for a station we can't
|
||||
|
@ -573,9 +573,9 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
|
|||
* Also, disassociate frames might happen, particular with
|
||||
* reason 7 ("Class 3 frame received from nonassociated STA").
|
||||
*/
|
||||
if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) ||
|
||||
ieee80211_is_deauth(fc) || ieee80211_is_assoc_resp(fc) ||
|
||||
ieee80211_is_disassoc(fc))
|
||||
if (ieee80211_is_mgmt(fc) &&
|
||||
(!ieee80211_is_bufferable_mmpdu(fc) ||
|
||||
ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
|
||||
return mvm->probe_queue;
|
||||
if (info->hw_queue == info->control.vif->cab_queue)
|
||||
return mvmvif->cab_queue;
|
||||
|
|
|
@ -115,6 +115,8 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
|
|||
|
||||
vif = qtnf_netdev_get_priv(wdev->netdev);
|
||||
|
||||
qtnf_scan_done(vif->mac, true);
|
||||
|
||||
if (qtnf_cmd_send_del_intf(vif))
|
||||
pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid,
|
||||
vif->vifid);
|
||||
|
@ -335,6 +337,8 @@ static int qtnf_stop_ap(struct wiphy *wiphy, struct net_device *dev)
|
|||
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
|
||||
int ret;
|
||||
|
||||
qtnf_scan_done(vif->mac, true);
|
||||
|
||||
ret = qtnf_cmd_send_stop_ap(vif);
|
||||
if (ret) {
|
||||
pr_err("VIF%u.%u: failed to stop AP operation in FW\n",
|
||||
|
@ -570,8 +574,6 @@ qtnf_del_station(struct wiphy *wiphy, struct net_device *dev,
|
|||
!qtnf_sta_list_lookup(&vif->sta_list, params->mac))
|
||||
return 0;
|
||||
|
||||
qtnf_scan_done(vif->mac, true);
|
||||
|
||||
ret = qtnf_cmd_send_del_sta(vif, params);
|
||||
if (ret)
|
||||
pr_err("VIF%u.%u: failed to delete STA %pM\n",
|
||||
|
@ -1134,8 +1136,9 @@ void qtnf_virtual_intf_cleanup(struct net_device *ndev)
|
|||
}
|
||||
|
||||
vif->sta_state = QTNF_STA_DISCONNECTED;
|
||||
qtnf_scan_done(mac, true);
|
||||
}
|
||||
|
||||
qtnf_scan_done(mac, true);
|
||||
}
|
||||
|
||||
void qtnf_cfg80211_vif_reset(struct qtnf_vif *vif)
|
||||
|
|
|
@ -34,6 +34,9 @@ static inline void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted)
|
|||
.aborted = aborted,
|
||||
};
|
||||
|
||||
if (timer_pending(&mac->scan_timeout))
|
||||
del_timer_sync(&mac->scan_timeout);
|
||||
|
||||
mutex_lock(&mac->mac_lock);
|
||||
|
||||
if (mac->scan_req) {
|
||||
|
|
|
@ -345,8 +345,6 @@ qtnf_event_handle_scan_complete(struct qtnf_wmac *mac,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (timer_pending(&mac->scan_timeout))
|
||||
del_timer_sync(&mac->scan_timeout);
|
||||
qtnf_scan_done(mac, le32_to_cpu(status->flags) & QLINK_SCAN_ABORTED);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -661,14 +661,18 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
|
|||
struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
|
||||
dma_addr_t txbd_paddr, skb_paddr;
|
||||
struct qtnf_tx_bd *txbd;
|
||||
unsigned long flags;
|
||||
int len, i;
|
||||
u32 info;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&priv->tx0_lock, flags);
|
||||
|
||||
if (!qtnf_tx_queue_ready(priv)) {
|
||||
if (skb->dev)
|
||||
netif_stop_queue(skb->dev);
|
||||
|
||||
spin_unlock_irqrestore(&priv->tx0_lock, flags);
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
|
@ -717,8 +721,10 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
|
|||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
|
||||
qtnf_pcie_data_tx_reclaim(priv);
|
||||
priv->tx_done_count++;
|
||||
spin_unlock_irqrestore(&priv->tx0_lock, flags);
|
||||
|
||||
qtnf_pcie_data_tx_reclaim(priv);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
@ -1247,6 +1253,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
strcpy(bus->fwname, QTN_PCI_PEARL_FW_NAME);
|
||||
init_completion(&bus->request_firmware_complete);
|
||||
mutex_init(&bus->bus_lock);
|
||||
spin_lock_init(&pcie_priv->tx0_lock);
|
||||
spin_lock_init(&pcie_priv->irq_lock);
|
||||
spin_lock_init(&pcie_priv->tx_reclaim_lock);
|
||||
|
||||
|
|
|
@ -34,6 +34,8 @@ struct qtnf_pcie_bus_priv {
|
|||
|
||||
/* lock for tx reclaim operations */
|
||||
spinlock_t tx_reclaim_lock;
|
||||
/* lock for tx0 operations */
|
||||
spinlock_t tx0_lock;
|
||||
u8 msi_enabled;
|
||||
int mps;
|
||||
|
||||
|
|
|
@ -980,7 +980,6 @@ enum mlx5_cap_type {
|
|||
MLX5_CAP_RESERVED,
|
||||
MLX5_CAP_VECTOR_CALC,
|
||||
MLX5_CAP_QOS,
|
||||
MLX5_CAP_FPGA,
|
||||
/* NUM OF CAP Types */
|
||||
MLX5_CAP_NUM
|
||||
};
|
||||
|
@ -1110,10 +1109,10 @@ enum mlx5_mcam_feature_groups {
|
|||
MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
|
||||
|
||||
#define MLX5_CAP_FPGA(mdev, cap) \
|
||||
MLX5_GET(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap)
|
||||
MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap)
|
||||
|
||||
#define MLX5_CAP64_FPGA(mdev, cap) \
|
||||
MLX5_GET64(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap)
|
||||
MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
|
||||
|
||||
enum {
|
||||
MLX5_CMD_STAT_OK = 0x0,
|
||||
|
|
|
@ -774,6 +774,7 @@ struct mlx5_core_dev {
|
|||
u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
|
||||
u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
|
||||
u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
|
||||
u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
|
||||
} caps;
|
||||
phys_addr_t iseg_base;
|
||||
struct mlx5_init_seg __iomem *iseg;
|
||||
|
|
|
@ -327,7 +327,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
|
|||
u8 reserved_at_80[0x18];
|
||||
u8 log_max_destination[0x8];
|
||||
|
||||
u8 reserved_at_a0[0x18];
|
||||
u8 log_max_flow_counter[0x8];
|
||||
u8 reserved_at_a8[0x10];
|
||||
u8 log_max_flow[0x8];
|
||||
|
||||
u8 reserved_at_c0[0x40];
|
||||
|
|
|
@ -773,7 +773,10 @@ static inline int nla_parse_nested(struct nlattr *tb[], int maxtype,
|
|||
*/
|
||||
static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
|
||||
{
|
||||
return nla_put(skb, attrtype, sizeof(u8), &value);
|
||||
/* temporary variables to work around GCC PR81715 with asan-stack=1 */
|
||||
u8 tmp = value;
|
||||
|
||||
return nla_put(skb, attrtype, sizeof(u8), &tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -784,7 +787,9 @@ static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
|
|||
*/
|
||||
static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
|
||||
{
|
||||
return nla_put(skb, attrtype, sizeof(u16), &value);
|
||||
u16 tmp = value;
|
||||
|
||||
return nla_put(skb, attrtype, sizeof(u16), &tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -795,7 +800,9 @@ static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
|
|||
*/
|
||||
static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
|
||||
{
|
||||
return nla_put(skb, attrtype, sizeof(__be16), &value);
|
||||
__be16 tmp = value;
|
||||
|
||||
return nla_put(skb, attrtype, sizeof(__be16), &tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -806,7 +813,9 @@ static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
|
|||
*/
|
||||
static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
|
||||
{
|
||||
return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, value);
|
||||
__be16 tmp = value;
|
||||
|
||||
return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -817,7 +826,9 @@ static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
|
|||
*/
|
||||
static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
|
||||
{
|
||||
return nla_put(skb, attrtype, sizeof(__le16), &value);
|
||||
__le16 tmp = value;
|
||||
|
||||
return nla_put(skb, attrtype, sizeof(__le16), &tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -828,7 +839,9 @@ static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
|
|||
*/
|
||||
static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
|
||||
{
|
||||
return nla_put(skb, attrtype, sizeof(u32), &value);
|
||||
u32 tmp = value;
|
||||
|
||||
return nla_put(skb, attrtype, sizeof(u32), &tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -839,7 +852,9 @@ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
|
|||
*/
|
||||
static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
|
||||
{
|
||||
return nla_put(skb, attrtype, sizeof(__be32), &value);
|
||||
__be32 tmp = value;
|
||||
|
||||
return nla_put(skb, attrtype, sizeof(__be32), &tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -850,7 +865,9 @@ static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
|
|||
*/
|
||||
static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
|
||||
{
|
||||
return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, value);
|
||||
__be32 tmp = value;
|
||||
|
||||
return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -861,7 +878,9 @@ static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
|
|||
*/
|
||||
static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
|
||||
{
|
||||
return nla_put(skb, attrtype, sizeof(__le32), &value);
|
||||
__le32 tmp = value;
|
||||
|
||||
return nla_put(skb, attrtype, sizeof(__le32), &tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -874,7 +893,9 @@ static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
|
|||
static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
|
||||
u64 value, int padattr)
|
||||
{
|
||||
return nla_put_64bit(skb, attrtype, sizeof(u64), &value, padattr);
|
||||
u64 tmp = value;
|
||||
|
||||
return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -887,7 +908,9 @@ static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
|
|||
static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
|
||||
int padattr)
|
||||
{
|
||||
return nla_put_64bit(skb, attrtype, sizeof(__be64), &value, padattr);
|
||||
__be64 tmp = value;
|
||||
|
||||
return nla_put_64bit(skb, attrtype, sizeof(__be64), &tmp, padattr);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -900,7 +923,9 @@ static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
|
|||
static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
|
||||
int padattr)
|
||||
{
|
||||
return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value,
|
||||
__be64 tmp = value;
|
||||
|
||||
return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, tmp,
|
||||
padattr);
|
||||
}
|
||||
|
||||
|
@ -914,7 +939,9 @@ static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
|
|||
static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
|
||||
int padattr)
|
||||
{
|
||||
return nla_put_64bit(skb, attrtype, sizeof(__le64), &value, padattr);
|
||||
__le64 tmp = value;
|
||||
|
||||
return nla_put_64bit(skb, attrtype, sizeof(__le64), &tmp, padattr);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -925,7 +952,9 @@ static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
|
|||
*/
|
||||
static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
|
||||
{
|
||||
return nla_put(skb, attrtype, sizeof(s8), &value);
|
||||
s8 tmp = value;
|
||||
|
||||
return nla_put(skb, attrtype, sizeof(s8), &tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -936,7 +965,9 @@ static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
|
|||
*/
|
||||
static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
|
||||
{
|
||||
return nla_put(skb, attrtype, sizeof(s16), &value);
|
||||
s16 tmp = value;
|
||||
|
||||
return nla_put(skb, attrtype, sizeof(s16), &tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -947,7 +978,9 @@ static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
|
|||
*/
|
||||
static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
|
||||
{
|
||||
return nla_put(skb, attrtype, sizeof(s32), &value);
|
||||
s32 tmp = value;
|
||||
|
||||
return nla_put(skb, attrtype, sizeof(s32), &tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -960,7 +993,9 @@ static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
|
|||
static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value,
|
||||
int padattr)
|
||||
{
|
||||
return nla_put_64bit(skb, attrtype, sizeof(s64), &value, padattr);
|
||||
s64 tmp = value;
|
||||
|
||||
return nla_put_64bit(skb, attrtype, sizeof(s64), &tmp, padattr);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1010,7 +1045,9 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
|
|||
static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype,
|
||||
__be32 addr)
|
||||
{
|
||||
return nla_put_be32(skb, attrtype, addr);
|
||||
__be32 tmp = addr;
|
||||
|
||||
return nla_put_be32(skb, attrtype, tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -39,8 +39,8 @@
|
|||
|
||||
/* This is used to register protocols. */
|
||||
struct net_protocol {
|
||||
void (*early_demux)(struct sk_buff *skb);
|
||||
void (*early_demux_handler)(struct sk_buff *skb);
|
||||
int (*early_demux)(struct sk_buff *skb);
|
||||
int (*early_demux_handler)(struct sk_buff *skb);
|
||||
int (*handler)(struct sk_buff *skb);
|
||||
void (*err_handler)(struct sk_buff *skb, u32 info);
|
||||
unsigned int no_policy:1,
|
||||
|
|
|
@ -175,7 +175,9 @@ static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4
|
|||
fl4->fl4_gre_key = gre_key;
|
||||
return ip_route_output_key(net, fl4);
|
||||
}
|
||||
|
||||
int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
||||
u8 tos, struct net_device *dev,
|
||||
struct in_device *in_dev, u32 *itag);
|
||||
int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
|
||||
u8 tos, struct net_device *devin);
|
||||
int ip_route_input_rcu(struct sk_buff *skb, __be32 dst, __be32 src,
|
||||
|
|
|
@ -345,7 +345,7 @@ void tcp_v4_err(struct sk_buff *skb, u32);
|
|||
|
||||
void tcp_shutdown(struct sock *sk, int how);
|
||||
|
||||
void tcp_v4_early_demux(struct sk_buff *skb);
|
||||
int tcp_v4_early_demux(struct sk_buff *skb);
|
||||
int tcp_v4_rcv(struct sk_buff *skb);
|
||||
|
||||
int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
|
||||
|
|
|
@ -259,7 +259,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
|
|||
return __skb_recv_udp(sk, flags, noblock, &peeked, &off, err);
|
||||
}
|
||||
|
||||
void udp_v4_early_demux(struct sk_buff *skb);
|
||||
int udp_v4_early_demux(struct sk_buff *skb);
|
||||
bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
|
||||
int udp_get_port(struct sock *sk, unsigned short snum,
|
||||
int (*saddr_cmp)(const struct sock *,
|
||||
|
|
|
@ -312,7 +312,7 @@ union bpf_attr {
|
|||
* jump into another BPF program
|
||||
* @ctx: context pointer passed to next program
|
||||
* @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
|
||||
* @index: index inside array that selects specific program to run
|
||||
* @index: 32-bit index inside array that selects specific program to run
|
||||
* Return: 0 on success or negative error
|
||||
*
|
||||
* int bpf_clone_redirect(skb, ifindex, flags)
|
||||
|
|
|
@ -1022,7 +1022,7 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
|
|||
struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
|
||||
struct bpf_array *array = container_of(map, struct bpf_array, map);
|
||||
struct bpf_prog *prog;
|
||||
u64 index = BPF_R3;
|
||||
u32 index = BPF_R3;
|
||||
|
||||
if (unlikely(index >= array->map.max_entries))
|
||||
goto out;
|
||||
|
|
|
@ -21,6 +21,12 @@ bool vlan_do_receive(struct sk_buff **skbp)
|
|||
if (unlikely(!skb))
|
||||
return false;
|
||||
|
||||
if (unlikely(!(vlan_dev->flags & IFF_UP))) {
|
||||
kfree_skb(skb);
|
||||
*skbp = NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
skb->dev = vlan_dev;
|
||||
if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) {
|
||||
/* Our lower layer thinks this is not local, let's make sure.
|
||||
|
|
|
@ -989,10 +989,14 @@ static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
|
|||
|
||||
bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
|
||||
{
|
||||
bool ret = __sk_filter_charge(sk, fp);
|
||||
if (ret)
|
||||
refcount_inc(&fp->refcnt);
|
||||
return ret;
|
||||
if (!refcount_inc_not_zero(&fp->refcnt))
|
||||
return false;
|
||||
|
||||
if (!__sk_filter_charge(sk, fp)) {
|
||||
sk_filter_release(fp);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
|
||||
|
|
|
@ -3854,6 +3854,9 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
|
|||
return -EMSGSIZE;
|
||||
|
||||
ifsm = nlmsg_data(nlh);
|
||||
ifsm->family = PF_UNSPEC;
|
||||
ifsm->pad1 = 0;
|
||||
ifsm->pad2 = 0;
|
||||
ifsm->ifindex = dev->ifindex;
|
||||
ifsm->filter_mask = filter_mask;
|
||||
|
||||
|
|
|
@ -1654,6 +1654,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
|
|||
|
||||
sock_copy(newsk, sk);
|
||||
|
||||
newsk->sk_prot_creator = sk->sk_prot;
|
||||
|
||||
/* SANITY */
|
||||
if (likely(newsk->sk_net_refcnt))
|
||||
get_net(sock_net(newsk));
|
||||
|
@ -1682,13 +1684,16 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
|
|||
|
||||
sock_reset_flag(newsk, SOCK_DONE);
|
||||
|
||||
filter = rcu_dereference_protected(newsk->sk_filter, 1);
|
||||
rcu_read_lock();
|
||||
filter = rcu_dereference(sk->sk_filter);
|
||||
if (filter != NULL)
|
||||
/* though it's an empty new sock, the charging may fail
|
||||
* if sysctl_optmem_max was changed between creation of
|
||||
* original socket and cloning
|
||||
*/
|
||||
is_charged = sk_filter_charge(newsk, filter);
|
||||
RCU_INIT_POINTER(newsk->sk_filter, filter);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
|
||||
/* We need to make sure that we don't uncharge the new
|
||||
|
|
|
@ -1301,28 +1301,33 @@ int dsa_slave_create(struct dsa_port *port, const char *name)
|
|||
p->old_duplex = -1;
|
||||
|
||||
port->netdev = slave_dev;
|
||||
ret = register_netdev(slave_dev);
|
||||
if (ret) {
|
||||
netdev_err(master, "error %d registering interface %s\n",
|
||||
ret, slave_dev->name);
|
||||
port->netdev = NULL;
|
||||
free_percpu(p->stats64);
|
||||
free_netdev(slave_dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
netif_carrier_off(slave_dev);
|
||||
|
||||
ret = dsa_slave_phy_setup(p, slave_dev);
|
||||
if (ret) {
|
||||
netdev_err(master, "error %d setting up slave phy\n", ret);
|
||||
unregister_netdev(slave_dev);
|
||||
free_percpu(p->stats64);
|
||||
free_netdev(slave_dev);
|
||||
return ret;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
ret = register_netdev(slave_dev);
|
||||
if (ret) {
|
||||
netdev_err(master, "error %d registering interface %s\n",
|
||||
ret, slave_dev->name);
|
||||
goto out_phy;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_phy:
|
||||
phy_disconnect(p->phy);
|
||||
if (of_phy_is_fixed_link(p->dp->dn))
|
||||
of_phy_deregister_fixed_link(p->dp->dn);
|
||||
out_free:
|
||||
free_percpu(p->stats64);
|
||||
free_netdev(slave_dev);
|
||||
port->netdev = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void dsa_slave_destroy(struct net_device *slave_dev)
|
||||
|
|
|
@ -128,9 +128,9 @@ static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
|
|||
break;
|
||||
}
|
||||
if (cmp == -1)
|
||||
pp = &(*pp)->rb_left;
|
||||
pp = &next->rb_left;
|
||||
else
|
||||
pp = &(*pp)->rb_right;
|
||||
pp = &next->rb_right;
|
||||
}
|
||||
*parent_p = parent;
|
||||
*pp_p = pp;
|
||||
|
|
|
@ -259,7 +259,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
|
|||
struct ip_tunnel *tunnel;
|
||||
struct erspanhdr *ershdr;
|
||||
const struct iphdr *iph;
|
||||
__be32 session_id;
|
||||
__be32 index;
|
||||
int len;
|
||||
|
||||
|
@ -275,8 +274,7 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
|
|||
/* The original GRE header does not have key field,
|
||||
* Use ERSPAN 10-bit session ID as key.
|
||||
*/
|
||||
session_id = cpu_to_be32(ntohs(ershdr->session_id));
|
||||
tpi->key = session_id;
|
||||
tpi->key = cpu_to_be32(ntohs(ershdr->session_id) & ID_MASK);
|
||||
index = ershdr->md.index;
|
||||
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
|
||||
tpi->flags | TUNNEL_KEY,
|
||||
|
@ -733,7 +731,7 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
|
|||
if (skb_cow_head(skb, dev->needed_headroom))
|
||||
goto free_skb;
|
||||
|
||||
if (skb->len > dev->mtu) {
|
||||
if (skb->len - dev->hard_header_len > dev->mtu) {
|
||||
pskb_trim(skb, dev->mtu);
|
||||
truncate = true;
|
||||
}
|
||||
|
@ -1223,6 +1221,7 @@ static int gre_tap_init(struct net_device *dev)
|
|||
{
|
||||
__gre_tunnel_init(dev);
|
||||
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
|
||||
netif_keep_dst(dev);
|
||||
|
||||
return ip_tunnel_init(dev);
|
||||
}
|
||||
|
@ -1246,13 +1245,16 @@ static int erspan_tunnel_init(struct net_device *dev)
|
|||
|
||||
tunnel->tun_hlen = 8;
|
||||
tunnel->parms.iph.protocol = IPPROTO_GRE;
|
||||
t_hlen = tunnel->hlen + sizeof(struct iphdr) + sizeof(struct erspanhdr);
|
||||
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
|
||||
sizeof(struct erspanhdr);
|
||||
t_hlen = tunnel->hlen + sizeof(struct iphdr);
|
||||
|
||||
dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
|
||||
dev->mtu = ETH_DATA_LEN - t_hlen - 4;
|
||||
dev->features |= GRE_FEATURES;
|
||||
dev->hw_features |= GRE_FEATURES;
|
||||
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
|
||||
netif_keep_dst(dev);
|
||||
|
||||
return ip_tunnel_init(dev);
|
||||
}
|
||||
|
|
|
@ -311,9 +311,10 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
|
|||
static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
const struct iphdr *iph = ip_hdr(skb);
|
||||
struct rtable *rt;
|
||||
int (*edemux)(struct sk_buff *skb);
|
||||
struct net_device *dev = skb->dev;
|
||||
void (*edemux)(struct sk_buff *skb);
|
||||
struct rtable *rt;
|
||||
int err;
|
||||
|
||||
/* if ingress device is enslaved to an L3 master device pass the
|
||||
* skb to its handler for processing
|
||||
|
@ -331,7 +332,9 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|||
|
||||
ipprot = rcu_dereference(inet_protos[protocol]);
|
||||
if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) {
|
||||
edemux(skb);
|
||||
err = edemux(skb);
|
||||
if (unlikely(err))
|
||||
goto drop_error;
|
||||
/* must reload iph, skb->head might have changed */
|
||||
iph = ip_hdr(skb);
|
||||
}
|
||||
|
@ -342,13 +345,10 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|||
* how the packet travels inside Linux networking.
|
||||
*/
|
||||
if (!skb_valid_dst(skb)) {
|
||||
int err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
|
||||
err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
|
||||
iph->tos, dev);
|
||||
if (unlikely(err)) {
|
||||
if (err == -EXDEV)
|
||||
__NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
|
||||
goto drop;
|
||||
}
|
||||
if (unlikely(err))
|
||||
goto drop_error;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IP_ROUTE_CLASSID
|
||||
|
@ -399,6 +399,11 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|||
drop:
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
|
||||
drop_error:
|
||||
if (err == -EXDEV)
|
||||
__NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -168,6 +168,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
|
|||
struct ip_tunnel_parm *parms = &tunnel->parms;
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct net_device *tdev; /* Device to other host */
|
||||
int pkt_len = skb->len;
|
||||
int err;
|
||||
int mtu;
|
||||
|
||||
|
@ -229,7 +230,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
|
|||
|
||||
err = dst_output(tunnel->net, skb->sk, skb);
|
||||
if (net_xmit_eval(err) == 0)
|
||||
err = skb->len;
|
||||
err = pkt_len;
|
||||
iptunnel_xmit_stats(dev, err);
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
|
|
|
@ -1520,43 +1520,56 @@ struct rtable *rt_dst_alloc(struct net_device *dev,
|
|||
EXPORT_SYMBOL(rt_dst_alloc);
|
||||
|
||||
/* called in rcu_read_lock() section */
|
||||
static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
||||
u8 tos, struct net_device *dev, int our)
|
||||
int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
||||
u8 tos, struct net_device *dev,
|
||||
struct in_device *in_dev, u32 *itag)
|
||||
{
|
||||
struct rtable *rth;
|
||||
struct in_device *in_dev = __in_dev_get_rcu(dev);
|
||||
unsigned int flags = RTCF_MULTICAST;
|
||||
u32 itag = 0;
|
||||
int err;
|
||||
|
||||
/* Primary sanity checks. */
|
||||
|
||||
if (!in_dev)
|
||||
return -EINVAL;
|
||||
|
||||
if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
|
||||
skb->protocol != htons(ETH_P_IP))
|
||||
goto e_inval;
|
||||
return -EINVAL;
|
||||
|
||||
if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
|
||||
goto e_inval;
|
||||
return -EINVAL;
|
||||
|
||||
if (ipv4_is_zeronet(saddr)) {
|
||||
if (!ipv4_is_local_multicast(daddr))
|
||||
goto e_inval;
|
||||
return -EINVAL;
|
||||
} else {
|
||||
err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
|
||||
in_dev, &itag);
|
||||
in_dev, itag);
|
||||
if (err < 0)
|
||||
goto e_err;
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* called in rcu_read_lock() section */
|
||||
static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
||||
u8 tos, struct net_device *dev, int our)
|
||||
{
|
||||
struct in_device *in_dev = __in_dev_get_rcu(dev);
|
||||
unsigned int flags = RTCF_MULTICAST;
|
||||
struct rtable *rth;
|
||||
u32 itag = 0;
|
||||
int err;
|
||||
|
||||
err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (our)
|
||||
flags |= RTCF_LOCAL;
|
||||
|
||||
rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
|
||||
IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
|
||||
if (!rth)
|
||||
goto e_nobufs;
|
||||
return -ENOBUFS;
|
||||
|
||||
#ifdef CONFIG_IP_ROUTE_CLASSID
|
||||
rth->dst.tclassid = itag;
|
||||
|
@ -1572,13 +1585,6 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
|||
|
||||
skb_dst_set(skb, &rth->dst);
|
||||
return 0;
|
||||
|
||||
e_nobufs:
|
||||
return -ENOBUFS;
|
||||
e_inval:
|
||||
return -EINVAL;
|
||||
e_err:
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1503,23 +1503,23 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
|
|||
}
|
||||
EXPORT_SYMBOL(tcp_v4_do_rcv);
|
||||
|
||||
void tcp_v4_early_demux(struct sk_buff *skb)
|
||||
int tcp_v4_early_demux(struct sk_buff *skb)
|
||||
{
|
||||
const struct iphdr *iph;
|
||||
const struct tcphdr *th;
|
||||
struct sock *sk;
|
||||
|
||||
if (skb->pkt_type != PACKET_HOST)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
iph = ip_hdr(skb);
|
||||
th = tcp_hdr(skb);
|
||||
|
||||
if (th->doff < sizeof(struct tcphdr) / 4)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
|
||||
iph->saddr, th->source,
|
||||
|
@ -1538,6 +1538,7 @@ void tcp_v4_early_demux(struct sk_buff *skb)
|
|||
skb_dst_set_noref(skb, dst);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
|
||||
|
|
|
@ -2221,9 +2221,10 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void udp_v4_early_demux(struct sk_buff *skb)
|
||||
int udp_v4_early_demux(struct sk_buff *skb)
|
||||
{
|
||||
struct net *net = dev_net(skb->dev);
|
||||
struct in_device *in_dev = NULL;
|
||||
const struct iphdr *iph;
|
||||
const struct udphdr *uh;
|
||||
struct sock *sk = NULL;
|
||||
|
@ -2234,24 +2235,24 @@ void udp_v4_early_demux(struct sk_buff *skb)
|
|||
|
||||
/* validate the packet */
|
||||
if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
iph = ip_hdr(skb);
|
||||
uh = udp_hdr(skb);
|
||||
|
||||
if (skb->pkt_type == PACKET_BROADCAST ||
|
||||
skb->pkt_type == PACKET_MULTICAST) {
|
||||
struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
|
||||
in_dev = __in_dev_get_rcu(skb->dev);
|
||||
|
||||
if (!in_dev)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
/* we are supposed to accept bcast packets */
|
||||
if (skb->pkt_type == PACKET_MULTICAST) {
|
||||
ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
|
||||
iph->protocol);
|
||||
if (!ours)
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
|
||||
|
@ -2263,7 +2264,7 @@ void udp_v4_early_demux(struct sk_buff *skb)
|
|||
}
|
||||
|
||||
if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
skb->sk = sk;
|
||||
skb->destructor = sock_efree;
|
||||
|
@ -2272,12 +2273,23 @@ void udp_v4_early_demux(struct sk_buff *skb)
|
|||
if (dst)
|
||||
dst = dst_check(dst, 0);
|
||||
if (dst) {
|
||||
u32 itag = 0;
|
||||
|
||||
/* set noref for now.
|
||||
* any place which wants to hold dst has to call
|
||||
* dst_hold_safe()
|
||||
*/
|
||||
skb_dst_set_noref(skb, dst);
|
||||
|
||||
/* for unconnected multicast sockets we need to validate
|
||||
* the source on each packet
|
||||
*/
|
||||
if (!inet_sk(sk)->inet_daddr && in_dev)
|
||||
return ip_mc_validate_source(skb, iph->daddr,
|
||||
iph->saddr, iph->tos,
|
||||
skb->dev, in_dev, &itag);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int udp_rcv(struct sk_buff *skb)
|
||||
|
|
|
@ -1311,6 +1311,7 @@ static void ip6gre_tap_setup(struct net_device *dev)
|
|||
dev->features |= NETIF_F_NETNS_LOCAL;
|
||||
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
|
||||
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
|
||||
netif_keep_dst(dev);
|
||||
}
|
||||
|
||||
static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
|
||||
|
|
|
@ -1043,6 +1043,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
|
|||
struct dst_entry *dst = NULL, *ndst = NULL;
|
||||
struct net_device *tdev;
|
||||
int mtu;
|
||||
unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0;
|
||||
unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
|
||||
unsigned int max_headroom = psh_hlen;
|
||||
bool use_cache = false;
|
||||
|
@ -1124,7 +1125,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
|
|||
t->parms.name);
|
||||
goto tx_err_dst_release;
|
||||
}
|
||||
mtu = dst_mtu(dst) - psh_hlen - t->tun_hlen;
|
||||
mtu = dst_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen;
|
||||
if (encap_limit >= 0) {
|
||||
max_headroom += 8;
|
||||
mtu -= 8;
|
||||
|
@ -1133,7 +1134,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
|
|||
mtu = IPV6_MIN_MTU;
|
||||
if (skb_dst(skb) && !t->parms.collect_md)
|
||||
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
|
||||
if (skb->len - t->tun_hlen > mtu && !skb_is_gso(skb)) {
|
||||
if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
|
||||
*pmtu = mtu;
|
||||
err = -EMSGSIZE;
|
||||
goto tx_err_dst_release;
|
||||
|
|
|
@ -445,6 +445,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
|
|||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct net_device *tdev;
|
||||
struct xfrm_state *x;
|
||||
int pkt_len = skb->len;
|
||||
int err = -1;
|
||||
int mtu;
|
||||
|
||||
|
@ -502,7 +503,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
|
|||
struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
|
||||
|
||||
u64_stats_update_begin(&tstats->syncp);
|
||||
tstats->tx_bytes += skb->len;
|
||||
tstats->tx_bytes += pkt_len;
|
||||
tstats->tx_packets++;
|
||||
u64_stats_update_end(&tstats->syncp);
|
||||
} else {
|
||||
|
|
|
@ -1314,6 +1314,9 @@ void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
|
|||
|
||||
hlist_del_init(&session->hlist);
|
||||
|
||||
if (test_and_set_bit(0, &session->dead))
|
||||
goto again;
|
||||
|
||||
if (session->ref != NULL)
|
||||
(*session->ref)(session);
|
||||
|
||||
|
@ -1685,14 +1688,12 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
|
|||
|
||||
/* This function is used by the netlink TUNNEL_DELETE command.
|
||||
*/
|
||||
int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
|
||||
void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
|
||||
{
|
||||
if (!test_and_set_bit(0, &tunnel->dead)) {
|
||||
l2tp_tunnel_inc_refcount(tunnel);
|
||||
if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
|
||||
l2tp_tunnel_dec_refcount(tunnel);
|
||||
return 1;
|
||||
queue_work(l2tp_wq, &tunnel->del_work);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
|
||||
|
||||
|
@ -1750,6 +1751,9 @@ EXPORT_SYMBOL_GPL(__l2tp_session_unhash);
|
|||
*/
|
||||
int l2tp_session_delete(struct l2tp_session *session)
|
||||
{
|
||||
if (test_and_set_bit(0, &session->dead))
|
||||
return 0;
|
||||
|
||||
if (session->ref)
|
||||
(*session->ref)(session);
|
||||
__l2tp_session_unhash(session);
|
||||
|
|
|
@ -76,6 +76,7 @@ struct l2tp_session_cfg {
|
|||
struct l2tp_session {
|
||||
int magic; /* should be
|
||||
* L2TP_SESSION_MAGIC */
|
||||
long dead;
|
||||
|
||||
struct l2tp_tunnel *tunnel; /* back pointer to tunnel
|
||||
* context */
|
||||
|
@ -160,6 +161,9 @@ struct l2tp_tunnel_cfg {
|
|||
|
||||
struct l2tp_tunnel {
|
||||
int magic; /* Should be L2TP_TUNNEL_MAGIC */
|
||||
|
||||
unsigned long dead;
|
||||
|
||||
struct rcu_head rcu;
|
||||
rwlock_t hlist_lock; /* protect session_hlist */
|
||||
bool acpt_newsess; /* Indicates whether this
|
||||
|
@ -254,7 +258,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
|
|||
u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
|
||||
struct l2tp_tunnel **tunnelp);
|
||||
void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
|
||||
int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
|
||||
void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
|
||||
struct l2tp_session *l2tp_session_create(int priv_size,
|
||||
struct l2tp_tunnel *tunnel,
|
||||
u32 session_id, u32 peer_session_id,
|
||||
|
|
|
@ -44,7 +44,6 @@ struct l2tp_eth {
|
|||
struct net_device *dev;
|
||||
struct sock *tunnel_sock;
|
||||
struct l2tp_session *session;
|
||||
struct list_head list;
|
||||
atomic_long_t tx_bytes;
|
||||
atomic_long_t tx_packets;
|
||||
atomic_long_t tx_dropped;
|
||||
|
@ -58,17 +57,6 @@ struct l2tp_eth_sess {
|
|||
struct net_device *dev;
|
||||
};
|
||||
|
||||
/* per-net private data for this module */
|
||||
static unsigned int l2tp_eth_net_id;
|
||||
struct l2tp_eth_net {
|
||||
struct list_head l2tp_eth_dev_list;
|
||||
spinlock_t l2tp_eth_lock;
|
||||
};
|
||||
|
||||
static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net)
|
||||
{
|
||||
return net_generic(net, l2tp_eth_net_id);
|
||||
}
|
||||
|
||||
static int l2tp_eth_dev_init(struct net_device *dev)
|
||||
{
|
||||
|
@ -84,12 +72,6 @@ static int l2tp_eth_dev_init(struct net_device *dev)
|
|||
|
||||
static void l2tp_eth_dev_uninit(struct net_device *dev)
|
||||
{
|
||||
struct l2tp_eth *priv = netdev_priv(dev);
|
||||
struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev));
|
||||
|
||||
spin_lock(&pn->l2tp_eth_lock);
|
||||
list_del_init(&priv->list);
|
||||
spin_unlock(&pn->l2tp_eth_lock);
|
||||
dev_put(dev);
|
||||
}
|
||||
|
||||
|
@ -273,7 +255,6 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
|
|||
struct l2tp_eth *priv;
|
||||
struct l2tp_eth_sess *spriv;
|
||||
int rc;
|
||||
struct l2tp_eth_net *pn;
|
||||
|
||||
if (cfg->ifname) {
|
||||
strlcpy(name, cfg->ifname, IFNAMSIZ);
|
||||
|
@ -305,7 +286,6 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
|
|||
priv = netdev_priv(dev);
|
||||
priv->dev = dev;
|
||||
priv->session = session;
|
||||
INIT_LIST_HEAD(&priv->list);
|
||||
|
||||
priv->tunnel_sock = tunnel->sock;
|
||||
session->recv_skb = l2tp_eth_dev_recv;
|
||||
|
@ -326,10 +306,6 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
|
|||
strlcpy(session->ifname, dev->name, IFNAMSIZ);
|
||||
|
||||
dev_hold(dev);
|
||||
pn = l2tp_eth_pernet(dev_net(dev));
|
||||
spin_lock(&pn->l2tp_eth_lock);
|
||||
list_add(&priv->list, &pn->l2tp_eth_dev_list);
|
||||
spin_unlock(&pn->l2tp_eth_lock);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -342,22 +318,6 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
|
|||
return rc;
|
||||
}
|
||||
|
||||
static __net_init int l2tp_eth_init_net(struct net *net)
|
||||
{
|
||||
struct l2tp_eth_net *pn = net_generic(net, l2tp_eth_net_id);
|
||||
|
||||
INIT_LIST_HEAD(&pn->l2tp_eth_dev_list);
|
||||
spin_lock_init(&pn->l2tp_eth_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pernet_operations l2tp_eth_net_ops = {
|
||||
.init = l2tp_eth_init_net,
|
||||
.id = &l2tp_eth_net_id,
|
||||
.size = sizeof(struct l2tp_eth_net),
|
||||
};
|
||||
|
||||
|
||||
static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = {
|
||||
.session_create = l2tp_eth_create,
|
||||
|
@ -371,25 +331,18 @@ static int __init l2tp_eth_init(void)
|
|||
|
||||
err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = register_pernet_device(&l2tp_eth_net_ops);
|
||||
if (err)
|
||||
goto out_unreg;
|
||||
goto err;
|
||||
|
||||
pr_info("L2TP ethernet pseudowire support (L2TPv3)\n");
|
||||
|
||||
return 0;
|
||||
|
||||
out_unreg:
|
||||
l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
|
||||
out:
|
||||
err:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __exit l2tp_eth_exit(void)
|
||||
{
|
||||
unregister_pernet_device(&l2tp_eth_net_ops);
|
||||
l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
|
||||
}
|
||||
|
||||
|
|
|
@ -437,12 +437,12 @@ static void pppol2tp_session_close(struct l2tp_session *session)
|
|||
|
||||
BUG_ON(session->magic != L2TP_SESSION_MAGIC);
|
||||
|
||||
if (sock) {
|
||||
if (sock)
|
||||
inet_shutdown(sock, SEND_SHUTDOWN);
|
||||
|
||||
/* Don't let the session go away before our socket does */
|
||||
l2tp_session_inc_refcount(session);
|
||||
}
|
||||
}
|
||||
|
||||
/* Really kill the session socket. (Called from sock_put() if
|
||||
* refcnt == 0.)
|
||||
|
|
|
@ -2270,10 +2270,13 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
|
|||
|
||||
mutex_unlock(nlk->cb_mutex);
|
||||
|
||||
ret = 0;
|
||||
if (cb->start)
|
||||
cb->start(cb);
|
||||
ret = cb->start(cb);
|
||||
|
||||
if (!ret)
|
||||
ret = netlink_dump(sk);
|
||||
|
||||
sock_put(sk);
|
||||
|
||||
if (ret)
|
||||
|
|
|
@ -2840,6 +2840,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
|
|||
struct virtio_net_hdr vnet_hdr = { 0 };
|
||||
int offset = 0;
|
||||
struct packet_sock *po = pkt_sk(sk);
|
||||
bool has_vnet_hdr = false;
|
||||
int hlen, tlen, linear;
|
||||
int extra_len = 0;
|
||||
|
||||
|
@ -2883,6 +2884,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
|
|||
err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
has_vnet_hdr = true;
|
||||
}
|
||||
|
||||
if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
|
||||
|
@ -2941,7 +2943,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
|
|||
skb->priority = sk->sk_priority;
|
||||
skb->mark = sockc.mark;
|
||||
|
||||
if (po->has_vnet_hdr) {
|
||||
if (has_vnet_hdr) {
|
||||
err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
|
||||
if (err)
|
||||
goto out_free;
|
||||
|
@ -3069,13 +3071,15 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
|
|||
int ret = 0;
|
||||
bool unlisted = false;
|
||||
|
||||
if (po->fanout)
|
||||
return -EINVAL;
|
||||
|
||||
lock_sock(sk);
|
||||
spin_lock(&po->bind_lock);
|
||||
rcu_read_lock();
|
||||
|
||||
if (po->fanout) {
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (name) {
|
||||
dev = dev_get_by_name_rcu(sock_net(sk), name);
|
||||
if (!dev) {
|
||||
|
|
|
@ -463,6 +463,7 @@ static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
|||
.r = r,
|
||||
.net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN),
|
||||
};
|
||||
int pos = cb->args[2];
|
||||
|
||||
/* eps hashtable dumps
|
||||
* args:
|
||||
|
@ -493,7 +494,8 @@ static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
|||
goto done;
|
||||
|
||||
sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump,
|
||||
net, (int *)&cb->args[2], &commp);
|
||||
net, &pos, &commp);
|
||||
cb->args[2] = pos;
|
||||
|
||||
done:
|
||||
cb->args[1] = cb->args[4];
|
||||
|
|
|
@ -551,7 +551,7 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
|
|||
return false;
|
||||
if (msg_errcode(msg))
|
||||
return false;
|
||||
*err = -TIPC_ERR_NO_NAME;
|
||||
*err = TIPC_ERR_NO_NAME;
|
||||
if (skb_linearize(skb))
|
||||
return false;
|
||||
msg = buf_msg(skb);
|
||||
|
|
Loading…
Reference in New Issue