mirror of https://gitee.com/openkylin/linux.git
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) MODULE_FIRMWARE firmware string not correct for iwlwifi 8000 chips, from Sara Sharon. 2) Fix SKB size checks in batman-adv stack on receive, from Sven Eckelmann. 3) Leak fix on mac80211 interface add error paths, from Johannes Berg. 4) Cannot invoke napi_disable() with BH disabled in myri10ge driver, fix from Stanislaw Gruszka. 5) Fix sign extension problem when computing feature masks in net_gso_ok(), from Marcelo Ricardo Leitner. 6) lan78xx driver doesn't count packets and packet lengths in its statistics properly, fix from Woojung Huh. 7) Fix the buffer allocation sizes in pegasus USB driver, from Petko Manolov. 8) Fix refcount overflows in bpf, from Alexei Starovoitov. 9) Unified dst cache handling introduced a preempt warning in ip_tunnel, fix by resetting rather then setting the cached route. From Paolo Abeni. 10) Listener hash collision test fix in soreuseport, from Craig Gallak * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (47 commits) gre: do not pull header in ICMP error processing net: Implement net_dbg_ratelimited() for CONFIG_DYNAMIC_DEBUG case tipc: only process unicast on intended node cxgb3: fix out of bounds read net/smscx5xx: use the device tree for mac address soreuseport: Fix TCP listener hash collision net: l2tp: fix reversed udp6 checksum flags ip_tunnel: fix preempt warning in ip tunnel creation/updating samples/bpf: fix trace_output example bpf: fix check_map_func_compatibility logic bpf: fix refcnt overflow drivers: net: cpsw: use of_phy_connect() in fixed-link case dt: cpsw: phy-handle, phy_id, and fixed-link are mutually exclusive drivers: net: cpsw: don't ignore phy-mode if phy-handle is used drivers: net: cpsw: fix segfault in case of bad phy-handle drivers: net: cpsw: fix parsing of phy-handle DT property in dual_emac config MAINTAINERS: net: Change maintainer for GRETH 10/100/1G Ethernet MAC device driver gre: reject GUE and FOU in collect metadata mode pegasus: fixes reported packet length pegasus: fixes URB buffer allocation size; ...
This commit is contained in:
commit
9c5d1bc2b7
|
@ -45,13 +45,13 @@ Required properties:
|
||||||
Optional properties:
|
Optional properties:
|
||||||
- dual_emac_res_vlan : Specifies VID to be used to segregate the ports
|
- dual_emac_res_vlan : Specifies VID to be used to segregate the ports
|
||||||
- mac-address : See ethernet.txt file in the same directory
|
- mac-address : See ethernet.txt file in the same directory
|
||||||
- phy_id : Specifies slave phy id
|
- phy_id : Specifies slave phy id (deprecated, use phy-handle)
|
||||||
- phy-handle : See ethernet.txt file in the same directory
|
- phy-handle : See ethernet.txt file in the same directory
|
||||||
|
|
||||||
Slave sub-nodes:
|
Slave sub-nodes:
|
||||||
- fixed-link : See fixed-link.txt file in the same directory
|
- fixed-link : See fixed-link.txt file in the same directory
|
||||||
Either the property phy_id, or the sub-node
|
|
||||||
fixed-link can be specified
|
Note: Exactly one of phy_id, phy-handle, or fixed-link must be specified.
|
||||||
|
|
||||||
Note: "ti,hwmods" field is used to fetch the base address and irq
|
Note: "ti,hwmods" field is used to fetch the base address and irq
|
||||||
resources from TI, omap hwmod data base during device registration.
|
resources from TI, omap hwmod data base during device registration.
|
||||||
|
|
|
@ -6,7 +6,7 @@ This is the driver for the Altera Triple-Speed Ethernet (TSE) controllers
|
||||||
using the SGDMA and MSGDMA soft DMA IP components. The driver uses the
|
using the SGDMA and MSGDMA soft DMA IP components. The driver uses the
|
||||||
platform bus to obtain component resources. The designs used to test this
|
platform bus to obtain component resources. The designs used to test this
|
||||||
driver were built for a Cyclone(R) V SOC FPGA board, a Cyclone(R) V FPGA board,
|
driver were built for a Cyclone(R) V SOC FPGA board, a Cyclone(R) V FPGA board,
|
||||||
and tested with ARM and NIOS processor hosts seperately. The anticipated use
|
and tested with ARM and NIOS processor hosts separately. The anticipated use
|
||||||
cases are simple communications between an embedded system and an external peer
|
cases are simple communications between an embedded system and an external peer
|
||||||
for status and simple configuration of the embedded system.
|
for status and simple configuration of the embedded system.
|
||||||
|
|
||||||
|
@ -65,14 +65,14 @@ Driver parameters can be also passed in command line by using:
|
||||||
4.1) Transmit process
|
4.1) Transmit process
|
||||||
When the driver's transmit routine is called by the kernel, it sets up a
|
When the driver's transmit routine is called by the kernel, it sets up a
|
||||||
transmit descriptor by calling the underlying DMA transmit routine (SGDMA or
|
transmit descriptor by calling the underlying DMA transmit routine (SGDMA or
|
||||||
MSGDMA), and initites a transmit operation. Once the transmit is complete, an
|
MSGDMA), and initiates a transmit operation. Once the transmit is complete, an
|
||||||
interrupt is driven by the transmit DMA logic. The driver handles the transmit
|
interrupt is driven by the transmit DMA logic. The driver handles the transmit
|
||||||
completion in the context of the interrupt handling chain by recycling
|
completion in the context of the interrupt handling chain by recycling
|
||||||
resource required to send and track the requested transmit operation.
|
resource required to send and track the requested transmit operation.
|
||||||
|
|
||||||
4.2) Receive process
|
4.2) Receive process
|
||||||
The driver will post receive buffers to the receive DMA logic during driver
|
The driver will post receive buffers to the receive DMA logic during driver
|
||||||
intialization. Receive buffers may or may not be queued depending upon the
|
initialization. Receive buffers may or may not be queued depending upon the
|
||||||
underlying DMA logic (MSGDMA is able queue receive buffers, SGDMA is not able
|
underlying DMA logic (MSGDMA is able queue receive buffers, SGDMA is not able
|
||||||
to queue receive buffers to the SGDMA receive logic). When a packet is
|
to queue receive buffers to the SGDMA receive logic). When a packet is
|
||||||
received, the DMA logic generates an interrupt. The driver handles a receive
|
received, the DMA logic generates an interrupt. The driver handles a receive
|
||||||
|
|
|
@ -8,7 +8,7 @@ Initial Release:
|
||||||
This is conceptually very similar to the macvlan driver with one major
|
This is conceptually very similar to the macvlan driver with one major
|
||||||
exception of using L3 for mux-ing /demux-ing among slaves. This property makes
|
exception of using L3 for mux-ing /demux-ing among slaves. This property makes
|
||||||
the master device share the L2 with it's slave devices. I have developed this
|
the master device share the L2 with it's slave devices. I have developed this
|
||||||
driver in conjuntion with network namespaces and not sure if there is use case
|
driver in conjunction with network namespaces and not sure if there is use case
|
||||||
outside of it.
|
outside of it.
|
||||||
|
|
||||||
|
|
||||||
|
@ -42,7 +42,7 @@ out. In this mode the slaves will RX/TX multicast and broadcast (if applicable)
|
||||||
as well.
|
as well.
|
||||||
|
|
||||||
4.2 L3 mode:
|
4.2 L3 mode:
|
||||||
In this mode TX processing upto L3 happens on the stack instance attached
|
In this mode TX processing up to L3 happens on the stack instance attached
|
||||||
to the slave device and packets are switched to the stack instance of the
|
to the slave device and packets are switched to the stack instance of the
|
||||||
master device for the L2 processing and routing from that instance will be
|
master device for the L2 processing and routing from that instance will be
|
||||||
used before packets are queued on the outbound device. In this mode the slaves
|
used before packets are queued on the outbound device. In this mode the slaves
|
||||||
|
@ -56,7 +56,7 @@ situations defines your use case then you can choose to use ipvlan -
|
||||||
(a) The Linux host that is connected to the external switch / router has
|
(a) The Linux host that is connected to the external switch / router has
|
||||||
policy configured that allows only one mac per port.
|
policy configured that allows only one mac per port.
|
||||||
(b) No of virtual devices created on a master exceed the mac capacity and
|
(b) No of virtual devices created on a master exceed the mac capacity and
|
||||||
puts the NIC in promiscous mode and degraded performance is a concern.
|
puts the NIC in promiscuous mode and degraded performance is a concern.
|
||||||
(c) If the slave device is to be put into the hostile / untrusted network
|
(c) If the slave device is to be put into the hostile / untrusted network
|
||||||
namespace where L2 on the slave could be changed / misused.
|
namespace where L2 on the slave could be changed / misused.
|
||||||
|
|
||||||
|
|
|
@ -67,12 +67,12 @@ The two basic thread commands are:
|
||||||
* add_device DEVICE@NAME -- adds a single device
|
* add_device DEVICE@NAME -- adds a single device
|
||||||
* rem_device_all -- remove all associated devices
|
* rem_device_all -- remove all associated devices
|
||||||
|
|
||||||
When adding a device to a thread, a corrosponding procfile is created
|
When adding a device to a thread, a corresponding procfile is created
|
||||||
which is used for configuring this device. Thus, device names need to
|
which is used for configuring this device. Thus, device names need to
|
||||||
be unique.
|
be unique.
|
||||||
|
|
||||||
To support adding the same device to multiple threads, which is useful
|
To support adding the same device to multiple threads, which is useful
|
||||||
with multi queue NICs, a the device naming scheme is extended with "@":
|
with multi queue NICs, the device naming scheme is extended with "@":
|
||||||
device@something
|
device@something
|
||||||
|
|
||||||
The part after "@" can be anything, but it is custom to use the thread
|
The part after "@" can be anything, but it is custom to use the thread
|
||||||
|
@ -221,7 +221,7 @@ Sample scripts
|
||||||
|
|
||||||
A collection of tutorial scripts and helpers for pktgen is in the
|
A collection of tutorial scripts and helpers for pktgen is in the
|
||||||
samples/pktgen directory. The helper parameters.sh file support easy
|
samples/pktgen directory. The helper parameters.sh file support easy
|
||||||
and consistant parameter parsing across the sample scripts.
|
and consistent parameter parsing across the sample scripts.
|
||||||
|
|
||||||
Usage example and help:
|
Usage example and help:
|
||||||
./pktgen_sample01_simple.sh -i eth4 -m 00:1B:21:3C:9D:F8 -d 192.168.8.2
|
./pktgen_sample01_simple.sh -i eth4 -m 00:1B:21:3C:9D:F8 -d 192.168.8.2
|
||||||
|
|
|
@ -41,7 +41,7 @@ using an rx_handler which gives the impression that packets flow through
|
||||||
the VRF device. Similarly on egress routing rules are used to send packets
|
the VRF device. Similarly on egress routing rules are used to send packets
|
||||||
to the VRF device driver before getting sent out the actual interface. This
|
to the VRF device driver before getting sent out the actual interface. This
|
||||||
allows tcpdump on a VRF device to capture all packets into and out of the
|
allows tcpdump on a VRF device to capture all packets into and out of the
|
||||||
VRF as a whole.[1] Similiarly, netfilter [2] and tc rules can be applied
|
VRF as a whole.[1] Similarly, netfilter [2] and tc rules can be applied
|
||||||
using the VRF device to specify rules that apply to the VRF domain as a whole.
|
using the VRF device to specify rules that apply to the VRF domain as a whole.
|
||||||
|
|
||||||
[1] Packets in the forwarded state do not flow through the device, so those
|
[1] Packets in the forwarded state do not flow through the device, so those
|
||||||
|
|
|
@ -4,7 +4,7 @@ Krisztian <hidden@balabit.hu> and others and additional patches
|
||||||
from Jamal <hadi@cyberus.ca>.
|
from Jamal <hadi@cyberus.ca>.
|
||||||
|
|
||||||
The end goal for syncing is to be able to insert attributes + generate
|
The end goal for syncing is to be able to insert attributes + generate
|
||||||
events so that the an SA can be safely moved from one machine to another
|
events so that the SA can be safely moved from one machine to another
|
||||||
for HA purposes.
|
for HA purposes.
|
||||||
The idea is to synchronize the SA so that the takeover machine can do
|
The idea is to synchronize the SA so that the takeover machine can do
|
||||||
the processing of the SA as accurate as possible if it has access to it.
|
the processing of the SA as accurate as possible if it has access to it.
|
||||||
|
@ -13,7 +13,7 @@ We already have the ability to generate SA add/del/upd events.
|
||||||
These patches add ability to sync and have accurate lifetime byte (to
|
These patches add ability to sync and have accurate lifetime byte (to
|
||||||
ensure proper decay of SAs) and replay counters to avoid replay attacks
|
ensure proper decay of SAs) and replay counters to avoid replay attacks
|
||||||
with as minimal loss at failover time.
|
with as minimal loss at failover time.
|
||||||
This way a backup stays as closely uptodate as an active member.
|
This way a backup stays as closely up-to-date as an active member.
|
||||||
|
|
||||||
Because the above items change for every packet the SA receives,
|
Because the above items change for every packet the SA receives,
|
||||||
it is possible for a lot of the events to be generated.
|
it is possible for a lot of the events to be generated.
|
||||||
|
@ -163,7 +163,7 @@ If you have an SA that is getting hit by traffic in bursts such that
|
||||||
there is a period where the timer threshold expires with no packets
|
there is a period where the timer threshold expires with no packets
|
||||||
seen, then an odd behavior is seen as follows:
|
seen, then an odd behavior is seen as follows:
|
||||||
The first packet arrival after a timer expiry will trigger a timeout
|
The first packet arrival after a timer expiry will trigger a timeout
|
||||||
aevent; i.e we dont wait for a timeout period or a packet threshold
|
event; i.e we don't wait for a timeout period or a packet threshold
|
||||||
to be reached. This is done for simplicity and efficiency reasons.
|
to be reached. This is done for simplicity and efficiency reasons.
|
||||||
|
|
||||||
-JHS
|
-JHS
|
||||||
|
|
|
@ -4903,7 +4903,7 @@ F: net/ipv4/gre_offload.c
|
||||||
F: include/net/gre.h
|
F: include/net/gre.h
|
||||||
|
|
||||||
GRETH 10/100/1G Ethernet MAC device driver
|
GRETH 10/100/1G Ethernet MAC device driver
|
||||||
M: Kristoffer Glembo <kristoffer@gaisler.com>
|
M: Andreas Larsson <andreas@gaisler.com>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/net/ethernet/aeroflex/
|
F: drivers/net/ethernet/aeroflex/
|
||||||
|
@ -10014,7 +10014,8 @@ F: drivers/infiniband/hw/ocrdma/
|
||||||
|
|
||||||
SFC NETWORK DRIVER
|
SFC NETWORK DRIVER
|
||||||
M: Solarflare linux maintainers <linux-net-drivers@solarflare.com>
|
M: Solarflare linux maintainers <linux-net-drivers@solarflare.com>
|
||||||
M: Shradha Shah <sshah@solarflare.com>
|
M: Edward Cree <ecree@solarflare.com>
|
||||||
|
M: Bert Kenward <bkenward@solarflare.com>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/net/ethernet/sfc/
|
F: drivers/net/ethernet/sfc/
|
||||||
|
|
|
@ -2181,7 +2181,7 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
|
||||||
struct net_device *bridge)
|
struct net_device *bridge)
|
||||||
{
|
{
|
||||||
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
|
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
|
||||||
int i, err;
|
int i, err = 0;
|
||||||
|
|
||||||
mutex_lock(&ps->smi_mutex);
|
mutex_lock(&ps->smi_mutex);
|
||||||
|
|
||||||
|
|
|
@ -581,12 +581,30 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
|
||||||
struct page *page;
|
struct page *page;
|
||||||
dma_addr_t mapping;
|
dma_addr_t mapping;
|
||||||
u16 sw_prod = rxr->rx_sw_agg_prod;
|
u16 sw_prod = rxr->rx_sw_agg_prod;
|
||||||
|
unsigned int offset = 0;
|
||||||
|
|
||||||
page = alloc_page(gfp);
|
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
|
||||||
if (!page)
|
page = rxr->rx_page;
|
||||||
return -ENOMEM;
|
if (!page) {
|
||||||
|
page = alloc_page(gfp);
|
||||||
|
if (!page)
|
||||||
|
return -ENOMEM;
|
||||||
|
rxr->rx_page = page;
|
||||||
|
rxr->rx_page_offset = 0;
|
||||||
|
}
|
||||||
|
offset = rxr->rx_page_offset;
|
||||||
|
rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
|
||||||
|
if (rxr->rx_page_offset == PAGE_SIZE)
|
||||||
|
rxr->rx_page = NULL;
|
||||||
|
else
|
||||||
|
get_page(page);
|
||||||
|
} else {
|
||||||
|
page = alloc_page(gfp);
|
||||||
|
if (!page)
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE,
|
mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
if (dma_mapping_error(&pdev->dev, mapping)) {
|
if (dma_mapping_error(&pdev->dev, mapping)) {
|
||||||
__free_page(page);
|
__free_page(page);
|
||||||
|
@ -601,6 +619,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
|
||||||
rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
|
rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
|
||||||
|
|
||||||
rx_agg_buf->page = page;
|
rx_agg_buf->page = page;
|
||||||
|
rx_agg_buf->offset = offset;
|
||||||
rx_agg_buf->mapping = mapping;
|
rx_agg_buf->mapping = mapping;
|
||||||
rxbd->rx_bd_haddr = cpu_to_le64(mapping);
|
rxbd->rx_bd_haddr = cpu_to_le64(mapping);
|
||||||
rxbd->rx_bd_opaque = sw_prod;
|
rxbd->rx_bd_opaque = sw_prod;
|
||||||
|
@ -642,6 +661,7 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
|
||||||
page = cons_rx_buf->page;
|
page = cons_rx_buf->page;
|
||||||
cons_rx_buf->page = NULL;
|
cons_rx_buf->page = NULL;
|
||||||
prod_rx_buf->page = page;
|
prod_rx_buf->page = page;
|
||||||
|
prod_rx_buf->offset = cons_rx_buf->offset;
|
||||||
|
|
||||||
prod_rx_buf->mapping = cons_rx_buf->mapping;
|
prod_rx_buf->mapping = cons_rx_buf->mapping;
|
||||||
|
|
||||||
|
@ -709,7 +729,8 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
|
||||||
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
|
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
|
||||||
|
|
||||||
cons_rx_buf = &rxr->rx_agg_ring[cons];
|
cons_rx_buf = &rxr->rx_agg_ring[cons];
|
||||||
skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len);
|
skb_fill_page_desc(skb, i, cons_rx_buf->page,
|
||||||
|
cons_rx_buf->offset, frag_len);
|
||||||
__clear_bit(cons, rxr->rx_agg_bmap);
|
__clear_bit(cons, rxr->rx_agg_bmap);
|
||||||
|
|
||||||
/* It is possible for bnxt_alloc_rx_page() to allocate
|
/* It is possible for bnxt_alloc_rx_page() to allocate
|
||||||
|
@ -740,7 +761,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE,
|
dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
skb->data_len += frag_len;
|
skb->data_len += frag_len;
|
||||||
|
@ -1584,13 +1605,17 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
|
||||||
|
|
||||||
dma_unmap_page(&pdev->dev,
|
dma_unmap_page(&pdev->dev,
|
||||||
dma_unmap_addr(rx_agg_buf, mapping),
|
dma_unmap_addr(rx_agg_buf, mapping),
|
||||||
PAGE_SIZE, PCI_DMA_FROMDEVICE);
|
BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
rx_agg_buf->page = NULL;
|
rx_agg_buf->page = NULL;
|
||||||
__clear_bit(j, rxr->rx_agg_bmap);
|
__clear_bit(j, rxr->rx_agg_bmap);
|
||||||
|
|
||||||
__free_page(page);
|
__free_page(page);
|
||||||
}
|
}
|
||||||
|
if (rxr->rx_page) {
|
||||||
|
__free_page(rxr->rx_page);
|
||||||
|
rxr->rx_page = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1973,7 +1998,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
|
||||||
if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
|
if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) |
|
type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
|
||||||
RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
|
RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
|
||||||
|
|
||||||
bnxt_init_rxbd_pages(ring, type);
|
bnxt_init_rxbd_pages(ring, type);
|
||||||
|
@ -2164,7 +2189,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
|
||||||
bp->rx_agg_nr_pages = 0;
|
bp->rx_agg_nr_pages = 0;
|
||||||
|
|
||||||
if (bp->flags & BNXT_FLAG_TPA)
|
if (bp->flags & BNXT_FLAG_TPA)
|
||||||
agg_factor = 4;
|
agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
|
||||||
|
|
||||||
bp->flags &= ~BNXT_FLAG_JUMBO;
|
bp->flags &= ~BNXT_FLAG_JUMBO;
|
||||||
if (rx_space > PAGE_SIZE) {
|
if (rx_space > PAGE_SIZE) {
|
||||||
|
@ -3020,12 +3045,12 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
|
||||||
/* Number of segs are log2 units, and first packet is not
|
/* Number of segs are log2 units, and first packet is not
|
||||||
* included as part of this units.
|
* included as part of this units.
|
||||||
*/
|
*/
|
||||||
if (mss <= PAGE_SIZE) {
|
if (mss <= BNXT_RX_PAGE_SIZE) {
|
||||||
n = PAGE_SIZE / mss;
|
n = BNXT_RX_PAGE_SIZE / mss;
|
||||||
nsegs = (MAX_SKB_FRAGS - 1) * n;
|
nsegs = (MAX_SKB_FRAGS - 1) * n;
|
||||||
} else {
|
} else {
|
||||||
n = mss / PAGE_SIZE;
|
n = mss / BNXT_RX_PAGE_SIZE;
|
||||||
if (mss & (PAGE_SIZE - 1))
|
if (mss & (BNXT_RX_PAGE_SIZE - 1))
|
||||||
n++;
|
n++;
|
||||||
nsegs = (MAX_SKB_FRAGS - n) / n;
|
nsegs = (MAX_SKB_FRAGS - n) / n;
|
||||||
}
|
}
|
||||||
|
@ -4309,7 +4334,7 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
|
||||||
if (bp->flags & BNXT_FLAG_MSIX_CAP)
|
if (bp->flags & BNXT_FLAG_MSIX_CAP)
|
||||||
rc = bnxt_setup_msix(bp);
|
rc = bnxt_setup_msix(bp);
|
||||||
|
|
||||||
if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
|
if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
|
||||||
/* fallback to INTA */
|
/* fallback to INTA */
|
||||||
rc = bnxt_setup_inta(bp);
|
rc = bnxt_setup_inta(bp);
|
||||||
}
|
}
|
||||||
|
|
|
@ -407,6 +407,15 @@ struct rx_tpa_end_cmp_ext {
|
||||||
|
|
||||||
#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT)
|
#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT)
|
||||||
|
|
||||||
|
/* The RXBD length is 16-bit so we can only support page sizes < 64K */
|
||||||
|
#if (PAGE_SHIFT > 15)
|
||||||
|
#define BNXT_RX_PAGE_SHIFT 15
|
||||||
|
#else
|
||||||
|
#define BNXT_RX_PAGE_SHIFT PAGE_SHIFT
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
|
||||||
|
|
||||||
#define BNXT_MIN_PKT_SIZE 45
|
#define BNXT_MIN_PKT_SIZE 45
|
||||||
|
|
||||||
#define BNXT_NUM_TESTS(bp) 0
|
#define BNXT_NUM_TESTS(bp) 0
|
||||||
|
@ -506,6 +515,7 @@ struct bnxt_sw_rx_bd {
|
||||||
|
|
||||||
struct bnxt_sw_rx_agg_bd {
|
struct bnxt_sw_rx_agg_bd {
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
unsigned int offset;
|
||||||
dma_addr_t mapping;
|
dma_addr_t mapping;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -586,6 +596,9 @@ struct bnxt_rx_ring_info {
|
||||||
unsigned long *rx_agg_bmap;
|
unsigned long *rx_agg_bmap;
|
||||||
u16 rx_agg_bmap_size;
|
u16 rx_agg_bmap_size;
|
||||||
|
|
||||||
|
struct page *rx_page;
|
||||||
|
unsigned int rx_page_offset;
|
||||||
|
|
||||||
dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
|
dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
|
||||||
dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
|
dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
|
||||||
|
|
||||||
|
|
|
@ -576,7 +576,7 @@ static void setup_rss(struct adapter *adap)
|
||||||
unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
|
unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
|
||||||
unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
|
unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
|
||||||
u8 cpus[SGE_QSETS + 1];
|
u8 cpus[SGE_QSETS + 1];
|
||||||
u16 rspq_map[RSS_TABLE_SIZE];
|
u16 rspq_map[RSS_TABLE_SIZE + 1];
|
||||||
|
|
||||||
for (i = 0; i < SGE_QSETS; ++i)
|
for (i = 0; i < SGE_QSETS; ++i)
|
||||||
cpus[i] = i;
|
cpus[i] = i;
|
||||||
|
@ -586,6 +586,7 @@ static void setup_rss(struct adapter *adap)
|
||||||
rspq_map[i] = i % nq0;
|
rspq_map[i] = i % nq0;
|
||||||
rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
|
rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
|
||||||
}
|
}
|
||||||
|
rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
|
||||||
|
|
||||||
t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
|
t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
|
||||||
F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
|
F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
|
||||||
|
|
|
@ -2668,9 +2668,9 @@ static int myri10ge_close(struct net_device *dev)
|
||||||
|
|
||||||
del_timer_sync(&mgp->watchdog_timer);
|
del_timer_sync(&mgp->watchdog_timer);
|
||||||
mgp->running = MYRI10GE_ETH_STOPPING;
|
mgp->running = MYRI10GE_ETH_STOPPING;
|
||||||
local_bh_disable(); /* myri10ge_ss_lock_napi needs bh disabled */
|
|
||||||
for (i = 0; i < mgp->num_slices; i++) {
|
for (i = 0; i < mgp->num_slices; i++) {
|
||||||
napi_disable(&mgp->ss[i].napi);
|
napi_disable(&mgp->ss[i].napi);
|
||||||
|
local_bh_disable(); /* myri10ge_ss_lock_napi needs this */
|
||||||
/* Lock the slice to prevent the busy_poll handler from
|
/* Lock the slice to prevent the busy_poll handler from
|
||||||
* accessing it. Later when we bring the NIC up, myri10ge_open
|
* accessing it. Later when we bring the NIC up, myri10ge_open
|
||||||
* resets the slice including this lock.
|
* resets the slice including this lock.
|
||||||
|
@ -2679,8 +2679,8 @@ static int myri10ge_close(struct net_device *dev)
|
||||||
pr_info("Slice %d locked\n", i);
|
pr_info("Slice %d locked\n", i);
|
||||||
mdelay(1);
|
mdelay(1);
|
||||||
}
|
}
|
||||||
|
local_bh_enable();
|
||||||
}
|
}
|
||||||
local_bh_enable();
|
|
||||||
netif_carrier_off(dev);
|
netif_carrier_off(dev);
|
||||||
|
|
||||||
netif_tx_stop_all_queues(dev);
|
netif_tx_stop_all_queues(dev);
|
||||||
|
|
|
@ -1920,6 +1920,10 @@ static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (nic_data->datapath_caps &
|
||||||
|
1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
|
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
|
||||||
nic_data->vport_id);
|
nic_data->vport_id);
|
||||||
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
|
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
|
||||||
|
@ -2923,9 +2927,16 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
|
||||||
bool replacing)
|
bool replacing)
|
||||||
{
|
{
|
||||||
struct efx_ef10_nic_data *nic_data = efx->nic_data;
|
struct efx_ef10_nic_data *nic_data = efx->nic_data;
|
||||||
|
u32 flags = spec->flags;
|
||||||
|
|
||||||
memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
|
memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
|
||||||
|
|
||||||
|
/* Remove RSS flag if we don't have an RSS context. */
|
||||||
|
if (flags & EFX_FILTER_FLAG_RX_RSS &&
|
||||||
|
spec->rss_context == EFX_FILTER_RSS_CONTEXT_DEFAULT &&
|
||||||
|
nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID)
|
||||||
|
flags &= ~EFX_FILTER_FLAG_RX_RSS;
|
||||||
|
|
||||||
if (replacing) {
|
if (replacing) {
|
||||||
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
|
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
|
||||||
MC_CMD_FILTER_OP_IN_OP_REPLACE);
|
MC_CMD_FILTER_OP_IN_OP_REPLACE);
|
||||||
|
@ -2985,10 +2996,10 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
|
||||||
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
|
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
|
||||||
0 : spec->dmaq_id);
|
0 : spec->dmaq_id);
|
||||||
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
|
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
|
||||||
(spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
|
(flags & EFX_FILTER_FLAG_RX_RSS) ?
|
||||||
MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
|
MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
|
||||||
MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
|
MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
|
||||||
if (spec->flags & EFX_FILTER_FLAG_RX_RSS)
|
if (flags & EFX_FILTER_FLAG_RX_RSS)
|
||||||
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
|
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
|
||||||
spec->rss_context !=
|
spec->rss_context !=
|
||||||
EFX_FILTER_RSS_CONTEXT_DEFAULT ?
|
EFX_FILTER_RSS_CONTEXT_DEFAULT ?
|
||||||
|
|
|
@ -367,7 +367,6 @@ struct cpsw_priv {
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
struct platform_device *pdev;
|
struct platform_device *pdev;
|
||||||
struct net_device *ndev;
|
struct net_device *ndev;
|
||||||
struct device_node *phy_node;
|
|
||||||
struct napi_struct napi_rx;
|
struct napi_struct napi_rx;
|
||||||
struct napi_struct napi_tx;
|
struct napi_struct napi_tx;
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
|
@ -1148,25 +1147,34 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
|
||||||
cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
|
cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
|
||||||
1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
|
1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
|
||||||
|
|
||||||
if (priv->phy_node)
|
if (slave->data->phy_node) {
|
||||||
slave->phy = of_phy_connect(priv->ndev, priv->phy_node,
|
slave->phy = of_phy_connect(priv->ndev, slave->data->phy_node,
|
||||||
&cpsw_adjust_link, 0, slave->data->phy_if);
|
&cpsw_adjust_link, 0, slave->data->phy_if);
|
||||||
else
|
if (!slave->phy) {
|
||||||
|
dev_err(priv->dev, "phy \"%s\" not found on slave %d\n",
|
||||||
|
slave->data->phy_node->full_name,
|
||||||
|
slave->slave_num);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
|
slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
|
||||||
&cpsw_adjust_link, slave->data->phy_if);
|
&cpsw_adjust_link, slave->data->phy_if);
|
||||||
if (IS_ERR(slave->phy)) {
|
if (IS_ERR(slave->phy)) {
|
||||||
dev_err(priv->dev, "phy %s not found on slave %d\n",
|
dev_err(priv->dev,
|
||||||
slave->data->phy_id, slave->slave_num);
|
"phy \"%s\" not found on slave %d, err %ld\n",
|
||||||
slave->phy = NULL;
|
slave->data->phy_id, slave->slave_num,
|
||||||
} else {
|
PTR_ERR(slave->phy));
|
||||||
phy_attached_info(slave->phy);
|
slave->phy = NULL;
|
||||||
|
return;
|
||||||
phy_start(slave->phy);
|
}
|
||||||
|
|
||||||
/* Configure GMII_SEL register */
|
|
||||||
cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface,
|
|
||||||
slave->slave_num);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
phy_attached_info(slave->phy);
|
||||||
|
|
||||||
|
phy_start(slave->phy);
|
||||||
|
|
||||||
|
/* Configure GMII_SEL register */
|
||||||
|
cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface, slave->slave_num);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
|
static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
|
||||||
|
@ -1940,12 +1948,11 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
|
||||||
slave->port_vlan = data->dual_emac_res_vlan;
|
slave->port_vlan = data->dual_emac_res_vlan;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cpsw_probe_dt(struct cpsw_priv *priv,
|
static int cpsw_probe_dt(struct cpsw_platform_data *data,
|
||||||
struct platform_device *pdev)
|
struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct device_node *node = pdev->dev.of_node;
|
struct device_node *node = pdev->dev.of_node;
|
||||||
struct device_node *slave_node;
|
struct device_node *slave_node;
|
||||||
struct cpsw_platform_data *data = &priv->data;
|
|
||||||
int i = 0, ret;
|
int i = 0, ret;
|
||||||
u32 prop;
|
u32 prop;
|
||||||
|
|
||||||
|
@ -2033,25 +2040,21 @@ static int cpsw_probe_dt(struct cpsw_priv *priv,
|
||||||
if (strcmp(slave_node->name, "slave"))
|
if (strcmp(slave_node->name, "slave"))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0);
|
slave_data->phy_node = of_parse_phandle(slave_node,
|
||||||
|
"phy-handle", 0);
|
||||||
parp = of_get_property(slave_node, "phy_id", &lenp);
|
parp = of_get_property(slave_node, "phy_id", &lenp);
|
||||||
if (of_phy_is_fixed_link(slave_node)) {
|
if (slave_data->phy_node) {
|
||||||
struct device_node *phy_node;
|
dev_dbg(&pdev->dev,
|
||||||
struct phy_device *phy_dev;
|
"slave[%d] using phy-handle=\"%s\"\n",
|
||||||
|
i, slave_data->phy_node->full_name);
|
||||||
|
} else if (of_phy_is_fixed_link(slave_node)) {
|
||||||
/* In the case of a fixed PHY, the DT node associated
|
/* In the case of a fixed PHY, the DT node associated
|
||||||
* to the PHY is the Ethernet MAC DT node.
|
* to the PHY is the Ethernet MAC DT node.
|
||||||
*/
|
*/
|
||||||
ret = of_phy_register_fixed_link(slave_node);
|
ret = of_phy_register_fixed_link(slave_node);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
phy_node = of_node_get(slave_node);
|
slave_data->phy_node = of_node_get(slave_node);
|
||||||
phy_dev = of_phy_find_device(phy_node);
|
|
||||||
if (!phy_dev)
|
|
||||||
return -ENODEV;
|
|
||||||
snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
|
|
||||||
PHY_ID_FMT, phy_dev->mdio.bus->id,
|
|
||||||
phy_dev->mdio.addr);
|
|
||||||
} else if (parp) {
|
} else if (parp) {
|
||||||
u32 phyid;
|
u32 phyid;
|
||||||
struct device_node *mdio_node;
|
struct device_node *mdio_node;
|
||||||
|
@ -2072,7 +2075,9 @@ static int cpsw_probe_dt(struct cpsw_priv *priv,
|
||||||
snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
|
snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
|
||||||
PHY_ID_FMT, mdio->name, phyid);
|
PHY_ID_FMT, mdio->name, phyid);
|
||||||
} else {
|
} else {
|
||||||
dev_err(&pdev->dev, "No slave[%d] phy_id or fixed-link property\n", i);
|
dev_err(&pdev->dev,
|
||||||
|
"No slave[%d] phy_id, phy-handle, or fixed-link property\n",
|
||||||
|
i);
|
||||||
goto no_phy_slave;
|
goto no_phy_slave;
|
||||||
}
|
}
|
||||||
slave_data->phy_if = of_get_phy_mode(slave_node);
|
slave_data->phy_if = of_get_phy_mode(slave_node);
|
||||||
|
@ -2275,7 +2280,7 @@ static int cpsw_probe(struct platform_device *pdev)
|
||||||
/* Select default pin state */
|
/* Select default pin state */
|
||||||
pinctrl_pm_select_default_state(&pdev->dev);
|
pinctrl_pm_select_default_state(&pdev->dev);
|
||||||
|
|
||||||
if (cpsw_probe_dt(priv, pdev)) {
|
if (cpsw_probe_dt(&priv->data, pdev)) {
|
||||||
dev_err(&pdev->dev, "cpsw: platform data missing\n");
|
dev_err(&pdev->dev, "cpsw: platform data missing\n");
|
||||||
ret = -ENODEV;
|
ret = -ENODEV;
|
||||||
goto clean_runtime_disable_ret;
|
goto clean_runtime_disable_ret;
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
#include <linux/phy.h>
|
#include <linux/phy.h>
|
||||||
|
|
||||||
struct cpsw_slave_data {
|
struct cpsw_slave_data {
|
||||||
|
struct device_node *phy_node;
|
||||||
char phy_id[MII_BUS_ID_SIZE];
|
char phy_id[MII_BUS_ID_SIZE];
|
||||||
int phy_if;
|
int phy_if;
|
||||||
u8 mac_addr[ETH_ALEN];
|
u8 mac_addr[ETH_ALEN];
|
||||||
|
|
|
@ -1512,7 +1512,10 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
|
||||||
|
|
||||||
/* TODO: Add phy read and write and private statistics get feature */
|
/* TODO: Add phy read and write and private statistics get feature */
|
||||||
|
|
||||||
return phy_mii_ioctl(priv->phydev, ifrq, cmd);
|
if (priv->phydev)
|
||||||
|
return phy_mii_ioctl(priv->phydev, ifrq, cmd);
|
||||||
|
else
|
||||||
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int match_first_device(struct device *dev, void *data)
|
static int match_first_device(struct device *dev, void *data)
|
||||||
|
|
|
@ -1622,7 +1622,7 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* copy hw scan info */
|
/* copy hw scan info */
|
||||||
memcpy(target->hwinfo, scan_info, scan_info->size);
|
memcpy(target->hwinfo, scan_info, be16_to_cpu(scan_info->size));
|
||||||
target->essid_len = strnlen(scan_info->essid,
|
target->essid_len = strnlen(scan_info->essid,
|
||||||
sizeof(scan_info->essid));
|
sizeof(scan_info->essid));
|
||||||
target->rate_len = 0;
|
target->rate_len = 0;
|
||||||
|
|
|
@ -359,27 +359,25 @@ static void at803x_link_change_notify(struct phy_device *phydev)
|
||||||
* in the FIFO. In such cases, the FIFO enters an error mode it
|
* in the FIFO. In such cases, the FIFO enters an error mode it
|
||||||
* cannot recover from by software.
|
* cannot recover from by software.
|
||||||
*/
|
*/
|
||||||
if (phydev->drv->phy_id == ATH8030_PHY_ID) {
|
if (phydev->state == PHY_NOLINK) {
|
||||||
if (phydev->state == PHY_NOLINK) {
|
if (priv->gpiod_reset && !priv->phy_reset) {
|
||||||
if (priv->gpiod_reset && !priv->phy_reset) {
|
struct at803x_context context;
|
||||||
struct at803x_context context;
|
|
||||||
|
|
||||||
at803x_context_save(phydev, &context);
|
at803x_context_save(phydev, &context);
|
||||||
|
|
||||||
gpiod_set_value(priv->gpiod_reset, 1);
|
gpiod_set_value(priv->gpiod_reset, 1);
|
||||||
msleep(1);
|
msleep(1);
|
||||||
gpiod_set_value(priv->gpiod_reset, 0);
|
gpiod_set_value(priv->gpiod_reset, 0);
|
||||||
msleep(1);
|
msleep(1);
|
||||||
|
|
||||||
at803x_context_restore(phydev, &context);
|
at803x_context_restore(phydev, &context);
|
||||||
|
|
||||||
phydev_dbg(phydev, "%s(): phy was reset\n",
|
phydev_dbg(phydev, "%s(): phy was reset\n",
|
||||||
__func__);
|
__func__);
|
||||||
priv->phy_reset = true;
|
priv->phy_reset = true;
|
||||||
}
|
|
||||||
} else {
|
|
||||||
priv->phy_reset = false;
|
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
priv->phy_reset = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -391,7 +389,6 @@ static struct phy_driver at803x_driver[] = {
|
||||||
.phy_id_mask = 0xffffffef,
|
.phy_id_mask = 0xffffffef,
|
||||||
.probe = at803x_probe,
|
.probe = at803x_probe,
|
||||||
.config_init = at803x_config_init,
|
.config_init = at803x_config_init,
|
||||||
.link_change_notify = at803x_link_change_notify,
|
|
||||||
.set_wol = at803x_set_wol,
|
.set_wol = at803x_set_wol,
|
||||||
.get_wol = at803x_get_wol,
|
.get_wol = at803x_get_wol,
|
||||||
.suspend = at803x_suspend,
|
.suspend = at803x_suspend,
|
||||||
|
@ -427,7 +424,6 @@ static struct phy_driver at803x_driver[] = {
|
||||||
.phy_id_mask = 0xffffffef,
|
.phy_id_mask = 0xffffffef,
|
||||||
.probe = at803x_probe,
|
.probe = at803x_probe,
|
||||||
.config_init = at803x_config_init,
|
.config_init = at803x_config_init,
|
||||||
.link_change_notify = at803x_link_change_notify,
|
|
||||||
.set_wol = at803x_set_wol,
|
.set_wol = at803x_set_wol,
|
||||||
.get_wol = at803x_get_wol,
|
.get_wol = at803x_get_wol,
|
||||||
.suspend = at803x_suspend,
|
.suspend = at803x_suspend,
|
||||||
|
|
|
@ -269,6 +269,7 @@ struct skb_data { /* skb->cb is one of these */
|
||||||
struct lan78xx_net *dev;
|
struct lan78xx_net *dev;
|
||||||
enum skb_state state;
|
enum skb_state state;
|
||||||
size_t length;
|
size_t length;
|
||||||
|
int num_of_packet;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct usb_context {
|
struct usb_context {
|
||||||
|
@ -1803,7 +1804,34 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev)
|
||||||
|
|
||||||
static void lan78xx_link_status_change(struct net_device *net)
|
static void lan78xx_link_status_change(struct net_device *net)
|
||||||
{
|
{
|
||||||
/* nothing to do */
|
struct phy_device *phydev = net->phydev;
|
||||||
|
int ret, temp;
|
||||||
|
|
||||||
|
/* At forced 100 F/H mode, chip may fail to set mode correctly
|
||||||
|
* when cable is switched between long(~50+m) and short one.
|
||||||
|
* As workaround, set to 10 before setting to 100
|
||||||
|
* at forced 100 F/H mode.
|
||||||
|
*/
|
||||||
|
if (!phydev->autoneg && (phydev->speed == 100)) {
|
||||||
|
/* disable phy interrupt */
|
||||||
|
temp = phy_read(phydev, LAN88XX_INT_MASK);
|
||||||
|
temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
|
||||||
|
ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
|
||||||
|
|
||||||
|
temp = phy_read(phydev, MII_BMCR);
|
||||||
|
temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
|
||||||
|
phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
|
||||||
|
temp |= BMCR_SPEED100;
|
||||||
|
phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
|
||||||
|
|
||||||
|
/* clear pending interrupt generated while workaround */
|
||||||
|
temp = phy_read(phydev, LAN88XX_INT_STS);
|
||||||
|
|
||||||
|
/* enable phy interrupt back */
|
||||||
|
temp = phy_read(phydev, LAN88XX_INT_MASK);
|
||||||
|
temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
|
||||||
|
ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int lan78xx_phy_init(struct lan78xx_net *dev)
|
static int lan78xx_phy_init(struct lan78xx_net *dev)
|
||||||
|
@ -2464,7 +2492,7 @@ static void tx_complete(struct urb *urb)
|
||||||
struct lan78xx_net *dev = entry->dev;
|
struct lan78xx_net *dev = entry->dev;
|
||||||
|
|
||||||
if (urb->status == 0) {
|
if (urb->status == 0) {
|
||||||
dev->net->stats.tx_packets++;
|
dev->net->stats.tx_packets += entry->num_of_packet;
|
||||||
dev->net->stats.tx_bytes += entry->length;
|
dev->net->stats.tx_bytes += entry->length;
|
||||||
} else {
|
} else {
|
||||||
dev->net->stats.tx_errors++;
|
dev->net->stats.tx_errors++;
|
||||||
|
@ -2681,10 +2709,11 @@ void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
skb->protocol = eth_type_trans(skb, dev->net);
|
|
||||||
dev->net->stats.rx_packets++;
|
dev->net->stats.rx_packets++;
|
||||||
dev->net->stats.rx_bytes += skb->len;
|
dev->net->stats.rx_bytes += skb->len;
|
||||||
|
|
||||||
|
skb->protocol = eth_type_trans(skb, dev->net);
|
||||||
|
|
||||||
netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
|
netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
|
||||||
skb->len + sizeof(struct ethhdr), skb->protocol);
|
skb->len + sizeof(struct ethhdr), skb->protocol);
|
||||||
memset(skb->cb, 0, sizeof(struct skb_data));
|
memset(skb->cb, 0, sizeof(struct skb_data));
|
||||||
|
@ -2934,13 +2963,16 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
|
||||||
|
|
||||||
skb_totallen = 0;
|
skb_totallen = 0;
|
||||||
pkt_cnt = 0;
|
pkt_cnt = 0;
|
||||||
|
count = 0;
|
||||||
|
length = 0;
|
||||||
for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
|
for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
|
||||||
if (skb_is_gso(skb)) {
|
if (skb_is_gso(skb)) {
|
||||||
if (pkt_cnt) {
|
if (pkt_cnt) {
|
||||||
/* handle previous packets first */
|
/* handle previous packets first */
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
length = skb->len;
|
count = 1;
|
||||||
|
length = skb->len - TX_OVERHEAD;
|
||||||
skb2 = skb_dequeue(tqp);
|
skb2 = skb_dequeue(tqp);
|
||||||
goto gso_skb;
|
goto gso_skb;
|
||||||
}
|
}
|
||||||
|
@ -2961,14 +2993,13 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
|
||||||
for (count = pos = 0; count < pkt_cnt; count++) {
|
for (count = pos = 0; count < pkt_cnt; count++) {
|
||||||
skb2 = skb_dequeue(tqp);
|
skb2 = skb_dequeue(tqp);
|
||||||
if (skb2) {
|
if (skb2) {
|
||||||
|
length += (skb2->len - TX_OVERHEAD);
|
||||||
memcpy(skb->data + pos, skb2->data, skb2->len);
|
memcpy(skb->data + pos, skb2->data, skb2->len);
|
||||||
pos += roundup(skb2->len, sizeof(u32));
|
pos += roundup(skb2->len, sizeof(u32));
|
||||||
dev_kfree_skb(skb2);
|
dev_kfree_skb(skb2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
length = skb_totallen;
|
|
||||||
|
|
||||||
gso_skb:
|
gso_skb:
|
||||||
urb = usb_alloc_urb(0, GFP_ATOMIC);
|
urb = usb_alloc_urb(0, GFP_ATOMIC);
|
||||||
if (!urb) {
|
if (!urb) {
|
||||||
|
@ -2980,6 +3011,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
|
||||||
entry->urb = urb;
|
entry->urb = urb;
|
||||||
entry->dev = dev;
|
entry->dev = dev;
|
||||||
entry->length = length;
|
entry->length = length;
|
||||||
|
entry->num_of_packet = count;
|
||||||
|
|
||||||
spin_lock_irqsave(&dev->txq.lock, flags);
|
spin_lock_irqsave(&dev->txq.lock, flags);
|
||||||
ret = usb_autopm_get_interface_async(dev->intf);
|
ret = usb_autopm_get_interface_async(dev->intf);
|
||||||
|
|
|
@ -411,7 +411,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart);
|
read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart);
|
||||||
data[0] = 0xc9;
|
data[0] = 0xc8; /* TX & RX enable, append status, no CRC */
|
||||||
data[1] = 0;
|
data[1] = 0;
|
||||||
if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL))
|
if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL))
|
||||||
data[1] |= 0x20; /* set full duplex */
|
data[1] |= 0x20; /* set full duplex */
|
||||||
|
@ -497,7 +497,7 @@ static void read_bulk_callback(struct urb *urb)
|
||||||
pkt_len = buf[count - 3] << 8;
|
pkt_len = buf[count - 3] << 8;
|
||||||
pkt_len += buf[count - 4];
|
pkt_len += buf[count - 4];
|
||||||
pkt_len &= 0xfff;
|
pkt_len &= 0xfff;
|
||||||
pkt_len -= 8;
|
pkt_len -= 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -528,7 +528,7 @@ static void read_bulk_callback(struct urb *urb)
|
||||||
goon:
|
goon:
|
||||||
usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
|
usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
|
||||||
usb_rcvbulkpipe(pegasus->usb, 1),
|
usb_rcvbulkpipe(pegasus->usb, 1),
|
||||||
pegasus->rx_skb->data, PEGASUS_MTU + 8,
|
pegasus->rx_skb->data, PEGASUS_MTU,
|
||||||
read_bulk_callback, pegasus);
|
read_bulk_callback, pegasus);
|
||||||
rx_status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC);
|
rx_status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC);
|
||||||
if (rx_status == -ENODEV)
|
if (rx_status == -ENODEV)
|
||||||
|
@ -569,7 +569,7 @@ static void rx_fixup(unsigned long data)
|
||||||
}
|
}
|
||||||
usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
|
usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
|
||||||
usb_rcvbulkpipe(pegasus->usb, 1),
|
usb_rcvbulkpipe(pegasus->usb, 1),
|
||||||
pegasus->rx_skb->data, PEGASUS_MTU + 8,
|
pegasus->rx_skb->data, PEGASUS_MTU,
|
||||||
read_bulk_callback, pegasus);
|
read_bulk_callback, pegasus);
|
||||||
try_again:
|
try_again:
|
||||||
status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC);
|
status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC);
|
||||||
|
@ -823,7 +823,7 @@ static int pegasus_open(struct net_device *net)
|
||||||
|
|
||||||
usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
|
usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
|
||||||
usb_rcvbulkpipe(pegasus->usb, 1),
|
usb_rcvbulkpipe(pegasus->usb, 1),
|
||||||
pegasus->rx_skb->data, PEGASUS_MTU + 8,
|
pegasus->rx_skb->data, PEGASUS_MTU,
|
||||||
read_bulk_callback, pegasus);
|
read_bulk_callback, pegasus);
|
||||||
if ((res = usb_submit_urb(pegasus->rx_urb, GFP_KERNEL))) {
|
if ((res = usb_submit_urb(pegasus->rx_urb, GFP_KERNEL))) {
|
||||||
if (res == -ENODEV)
|
if (res == -ENODEV)
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
#include <linux/crc32.h>
|
#include <linux/crc32.h>
|
||||||
#include <linux/usb/usbnet.h>
|
#include <linux/usb/usbnet.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
#include <linux/of_net.h>
|
||||||
#include "smsc75xx.h"
|
#include "smsc75xx.h"
|
||||||
|
|
||||||
#define SMSC_CHIPNAME "smsc75xx"
|
#define SMSC_CHIPNAME "smsc75xx"
|
||||||
|
@ -761,6 +762,15 @@ static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
|
||||||
|
|
||||||
static void smsc75xx_init_mac_address(struct usbnet *dev)
|
static void smsc75xx_init_mac_address(struct usbnet *dev)
|
||||||
{
|
{
|
||||||
|
const u8 *mac_addr;
|
||||||
|
|
||||||
|
/* maybe the boot loader passed the MAC address in devicetree */
|
||||||
|
mac_addr = of_get_mac_address(dev->udev->dev.of_node);
|
||||||
|
if (mac_addr) {
|
||||||
|
memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* try reading mac address from EEPROM */
|
/* try reading mac address from EEPROM */
|
||||||
if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
|
if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
|
||||||
dev->net->dev_addr) == 0) {
|
dev->net->dev_addr) == 0) {
|
||||||
|
@ -772,7 +782,7 @@ static void smsc75xx_init_mac_address(struct usbnet *dev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* no eeprom, or eeprom values are invalid. generate random MAC */
|
/* no useful static MAC address found. generate a random one */
|
||||||
eth_hw_addr_random(dev->net);
|
eth_hw_addr_random(dev->net);
|
||||||
netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
|
netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
#include <linux/crc32.h>
|
#include <linux/crc32.h>
|
||||||
#include <linux/usb/usbnet.h>
|
#include <linux/usb/usbnet.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
#include <linux/of_net.h>
|
||||||
#include "smsc95xx.h"
|
#include "smsc95xx.h"
|
||||||
|
|
||||||
#define SMSC_CHIPNAME "smsc95xx"
|
#define SMSC_CHIPNAME "smsc95xx"
|
||||||
|
@ -765,6 +766,15 @@ static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
|
||||||
|
|
||||||
static void smsc95xx_init_mac_address(struct usbnet *dev)
|
static void smsc95xx_init_mac_address(struct usbnet *dev)
|
||||||
{
|
{
|
||||||
|
const u8 *mac_addr;
|
||||||
|
|
||||||
|
/* maybe the boot loader passed the MAC address in devicetree */
|
||||||
|
mac_addr = of_get_mac_address(dev->udev->dev.of_node);
|
||||||
|
if (mac_addr) {
|
||||||
|
memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* try reading mac address from EEPROM */
|
/* try reading mac address from EEPROM */
|
||||||
if (smsc95xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
|
if (smsc95xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
|
||||||
dev->net->dev_addr) == 0) {
|
dev->net->dev_addr) == 0) {
|
||||||
|
@ -775,7 +785,7 @@ static void smsc95xx_init_mac_address(struct usbnet *dev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* no eeprom, or eeprom values are invalid. generate random MAC */
|
/* no useful static MAC address found. generate a random one */
|
||||||
eth_hw_addr_random(dev->net);
|
eth_hw_addr_random(dev->net);
|
||||||
netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
|
netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
|
||||||
}
|
}
|
||||||
|
|
|
@ -274,6 +274,9 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
|
||||||
};
|
};
|
||||||
static const int inc[4] = { 0, 100, 0, 0 };
|
static const int inc[4] = { 0, 100, 0, 0 };
|
||||||
|
|
||||||
|
memset(&mask_m, 0, sizeof(int8_t) * 123);
|
||||||
|
memset(&mask_p, 0, sizeof(int8_t) * 123);
|
||||||
|
|
||||||
cur_bin = -6000;
|
cur_bin = -6000;
|
||||||
upper = bin + 100;
|
upper = bin + 100;
|
||||||
lower = bin - 100;
|
lower = bin - 100;
|
||||||
|
@ -424,14 +427,9 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
|
||||||
int tmp, new;
|
int tmp, new;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
int8_t mask_m[123];
|
|
||||||
int8_t mask_p[123];
|
|
||||||
int cur_bb_spur;
|
int cur_bb_spur;
|
||||||
bool is2GHz = IS_CHAN_2GHZ(chan);
|
bool is2GHz = IS_CHAN_2GHZ(chan);
|
||||||
|
|
||||||
memset(&mask_m, 0, sizeof(int8_t) * 123);
|
|
||||||
memset(&mask_p, 0, sizeof(int8_t) * 123);
|
|
||||||
|
|
||||||
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
|
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
|
||||||
cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
|
cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
|
||||||
if (AR_NO_SPUR == cur_bb_spur)
|
if (AR_NO_SPUR == cur_bb_spur)
|
||||||
|
|
|
@ -178,14 +178,9 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
|
||||||
int i;
|
int i;
|
||||||
struct chan_centers centers;
|
struct chan_centers centers;
|
||||||
|
|
||||||
int8_t mask_m[123];
|
|
||||||
int8_t mask_p[123];
|
|
||||||
int cur_bb_spur;
|
int cur_bb_spur;
|
||||||
bool is2GHz = IS_CHAN_2GHZ(chan);
|
bool is2GHz = IS_CHAN_2GHZ(chan);
|
||||||
|
|
||||||
memset(&mask_m, 0, sizeof(int8_t) * 123);
|
|
||||||
memset(&mask_p, 0, sizeof(int8_t) * 123);
|
|
||||||
|
|
||||||
ath9k_hw_get_channel_centers(ah, chan, ¢ers);
|
ath9k_hw_get_channel_centers(ah, chan, ¢ers);
|
||||||
freq = centers.synth_center;
|
freq = centers.synth_center;
|
||||||
|
|
||||||
|
|
|
@ -93,7 +93,7 @@
|
||||||
#define IWL8260_SMEM_OFFSET 0x400000
|
#define IWL8260_SMEM_OFFSET 0x400000
|
||||||
#define IWL8260_SMEM_LEN 0x68000
|
#define IWL8260_SMEM_LEN 0x68000
|
||||||
|
|
||||||
#define IWL8000_FW_PRE "iwlwifi-8000"
|
#define IWL8000_FW_PRE "iwlwifi-8000C-"
|
||||||
#define IWL8000_MODULE_FIRMWARE(api) \
|
#define IWL8000_MODULE_FIRMWARE(api) \
|
||||||
IWL8000_FW_PRE "-" __stringify(api) ".ucode"
|
IWL8000_FW_PRE "-" __stringify(api) ".ucode"
|
||||||
|
|
||||||
|
|
|
@ -238,19 +238,6 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
|
||||||
snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
|
snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
|
||||||
name_pre, tag);
|
name_pre, tag);
|
||||||
|
|
||||||
/*
|
|
||||||
* Starting 8000B - FW name format has changed. This overwrites the
|
|
||||||
* previous name and uses the new format.
|
|
||||||
*/
|
|
||||||
if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
|
|
||||||
char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev);
|
|
||||||
|
|
||||||
if (rev_step != 'A')
|
|
||||||
snprintf(drv->firmware_name,
|
|
||||||
sizeof(drv->firmware_name), "%s%c-%s.ucode",
|
|
||||||
name_pre, rev_step, tag);
|
|
||||||
}
|
|
||||||
|
|
||||||
IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
|
IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
|
||||||
(drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
|
(drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
|
||||||
? "EXPERIMENTAL " : "",
|
? "EXPERIMENTAL " : "",
|
||||||
|
@ -1060,11 +1047,18 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (WARN(fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
|
/*
|
||||||
!gscan_capa,
|
* If ucode advertises that it supports GSCAN but GSCAN
|
||||||
"GSCAN is supported but capabilities TLV is unavailable\n"))
|
* capabilities TLV is not present, or if it has an old format,
|
||||||
|
* warn and continue without GSCAN.
|
||||||
|
*/
|
||||||
|
if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
|
||||||
|
!gscan_capa) {
|
||||||
|
IWL_DEBUG_INFO(drv,
|
||||||
|
"GSCAN is supported but capabilities TLV is unavailable\n");
|
||||||
__clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
|
__clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
|
||||||
capa->_capa);
|
capa->_capa);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
@ -526,7 +526,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
|
||||||
file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
|
file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
|
||||||
|
|
||||||
/* Make room for fw's virtual image pages, if it exists */
|
/* Make room for fw's virtual image pages, if it exists */
|
||||||
if (mvm->fw->img[mvm->cur_ucode].paging_mem_size)
|
if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
|
||||||
|
mvm->fw_paging_db[0].fw_paging_block)
|
||||||
file_len += mvm->num_of_paging_blk *
|
file_len += mvm->num_of_paging_blk *
|
||||||
(sizeof(*dump_data) +
|
(sizeof(*dump_data) +
|
||||||
sizeof(struct iwl_fw_error_dump_paging) +
|
sizeof(struct iwl_fw_error_dump_paging) +
|
||||||
|
@ -643,7 +644,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Dump fw's virtual image */
|
/* Dump fw's virtual image */
|
||||||
if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) {
|
if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
|
||||||
|
mvm->fw_paging_db[0].fw_paging_block) {
|
||||||
for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
|
for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
|
||||||
struct iwl_fw_error_dump_paging *paging;
|
struct iwl_fw_error_dump_paging *paging;
|
||||||
struct page *pages =
|
struct page *pages =
|
||||||
|
|
|
@ -144,9 +144,11 @@ void iwl_free_fw_paging(struct iwl_mvm *mvm)
|
||||||
|
|
||||||
__free_pages(mvm->fw_paging_db[i].fw_paging_block,
|
__free_pages(mvm->fw_paging_db[i].fw_paging_block,
|
||||||
get_order(mvm->fw_paging_db[i].fw_paging_size));
|
get_order(mvm->fw_paging_db[i].fw_paging_size));
|
||||||
|
mvm->fw_paging_db[i].fw_paging_block = NULL;
|
||||||
}
|
}
|
||||||
kfree(mvm->trans->paging_download_buf);
|
kfree(mvm->trans->paging_download_buf);
|
||||||
mvm->trans->paging_download_buf = NULL;
|
mvm->trans->paging_download_buf = NULL;
|
||||||
|
mvm->trans->paging_db = NULL;
|
||||||
|
|
||||||
memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
|
memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
|
||||||
}
|
}
|
||||||
|
|
|
@ -479,8 +479,18 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
||||||
{IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
|
{IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
|
||||||
{IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)},
|
{IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)},
|
||||||
{IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
|
{IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
|
||||||
|
{IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)},
|
||||||
|
{IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)},
|
||||||
|
{IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)},
|
||||||
|
{IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)},
|
||||||
|
{IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)},
|
||||||
|
{IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)},
|
||||||
|
{IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8265_2ac_cfg)},
|
||||||
|
{IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8265_2ac_cfg)},
|
||||||
{IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)},
|
{IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)},
|
||||||
{IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
|
{IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
|
||||||
|
{IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8265_2ac_cfg)},
|
||||||
|
{IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)},
|
||||||
|
|
||||||
/* 9000 Series */
|
/* 9000 Series */
|
||||||
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
|
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
|
||||||
|
|
|
@ -171,12 +171,13 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl);
|
||||||
void bpf_register_map_type(struct bpf_map_type_list *tl);
|
void bpf_register_map_type(struct bpf_map_type_list *tl);
|
||||||
|
|
||||||
struct bpf_prog *bpf_prog_get(u32 ufd);
|
struct bpf_prog *bpf_prog_get(u32 ufd);
|
||||||
|
struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
|
||||||
void bpf_prog_put(struct bpf_prog *prog);
|
void bpf_prog_put(struct bpf_prog *prog);
|
||||||
void bpf_prog_put_rcu(struct bpf_prog *prog);
|
void bpf_prog_put_rcu(struct bpf_prog *prog);
|
||||||
|
|
||||||
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
|
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
|
||||||
struct bpf_map *__bpf_map_get(struct fd f);
|
struct bpf_map *__bpf_map_get(struct fd f);
|
||||||
void bpf_map_inc(struct bpf_map *map, bool uref);
|
struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
|
||||||
void bpf_map_put_with_uref(struct bpf_map *map);
|
void bpf_map_put_with_uref(struct bpf_map *map);
|
||||||
void bpf_map_put(struct bpf_map *map);
|
void bpf_map_put(struct bpf_map *map);
|
||||||
int bpf_map_precharge_memlock(u32 pages);
|
int bpf_map_precharge_memlock(u32 pages);
|
||||||
|
|
|
@ -246,7 +246,15 @@ do { \
|
||||||
net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
|
net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
|
||||||
#define net_info_ratelimited(fmt, ...) \
|
#define net_info_ratelimited(fmt, ...) \
|
||||||
net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
|
net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
|
||||||
#if defined(DEBUG)
|
#if defined(CONFIG_DYNAMIC_DEBUG)
|
||||||
|
#define net_dbg_ratelimited(fmt, ...) \
|
||||||
|
do { \
|
||||||
|
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
|
||||||
|
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
|
||||||
|
net_ratelimit()) \
|
||||||
|
__dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
|
||||||
|
} while (0)
|
||||||
|
#elif defined(DEBUG)
|
||||||
#define net_dbg_ratelimited(fmt, ...) \
|
#define net_dbg_ratelimited(fmt, ...) \
|
||||||
net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
|
net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -4004,7 +4004,7 @@ netdev_features_t netif_skb_features(struct sk_buff *skb);
|
||||||
|
|
||||||
static inline bool net_gso_ok(netdev_features_t features, int gso_type)
|
static inline bool net_gso_ok(netdev_features_t features, int gso_type)
|
||||||
{
|
{
|
||||||
netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
|
netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
|
||||||
|
|
||||||
/* check flags correspondence */
|
/* check flags correspondence */
|
||||||
BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
|
BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
|
||||||
|
|
|
@ -31,10 +31,10 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
|
||||||
{
|
{
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case BPF_TYPE_PROG:
|
case BPF_TYPE_PROG:
|
||||||
atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt);
|
raw = bpf_prog_inc(raw);
|
||||||
break;
|
break;
|
||||||
case BPF_TYPE_MAP:
|
case BPF_TYPE_MAP:
|
||||||
bpf_map_inc(raw, true);
|
raw = bpf_map_inc(raw, true);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
WARN_ON_ONCE(1);
|
WARN_ON_ONCE(1);
|
||||||
|
@ -297,7 +297,8 @@ static void *bpf_obj_do_get(const struct filename *pathname,
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
raw = bpf_any_get(inode->i_private, *type);
|
raw = bpf_any_get(inode->i_private, *type);
|
||||||
touch_atime(&path);
|
if (!IS_ERR(raw))
|
||||||
|
touch_atime(&path);
|
||||||
|
|
||||||
path_put(&path);
|
path_put(&path);
|
||||||
return raw;
|
return raw;
|
||||||
|
|
|
@ -218,11 +218,18 @@ struct bpf_map *__bpf_map_get(struct fd f)
|
||||||
return f.file->private_data;
|
return f.file->private_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
void bpf_map_inc(struct bpf_map *map, bool uref)
|
/* prog's and map's refcnt limit */
|
||||||
|
#define BPF_MAX_REFCNT 32768
|
||||||
|
|
||||||
|
struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
|
||||||
{
|
{
|
||||||
atomic_inc(&map->refcnt);
|
if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
|
||||||
|
atomic_dec(&map->refcnt);
|
||||||
|
return ERR_PTR(-EBUSY);
|
||||||
|
}
|
||||||
if (uref)
|
if (uref)
|
||||||
atomic_inc(&map->usercnt);
|
atomic_inc(&map->usercnt);
|
||||||
|
return map;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct bpf_map *bpf_map_get_with_uref(u32 ufd)
|
struct bpf_map *bpf_map_get_with_uref(u32 ufd)
|
||||||
|
@ -234,7 +241,7 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
|
||||||
if (IS_ERR(map))
|
if (IS_ERR(map))
|
||||||
return map;
|
return map;
|
||||||
|
|
||||||
bpf_map_inc(map, true);
|
map = bpf_map_inc(map, true);
|
||||||
fdput(f);
|
fdput(f);
|
||||||
|
|
||||||
return map;
|
return map;
|
||||||
|
@ -658,6 +665,15 @@ static struct bpf_prog *__bpf_prog_get(struct fd f)
|
||||||
return f.file->private_data;
|
return f.file->private_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
|
||||||
|
{
|
||||||
|
if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
|
||||||
|
atomic_dec(&prog->aux->refcnt);
|
||||||
|
return ERR_PTR(-EBUSY);
|
||||||
|
}
|
||||||
|
return prog;
|
||||||
|
}
|
||||||
|
|
||||||
/* called by sockets/tracing/seccomp before attaching program to an event
|
/* called by sockets/tracing/seccomp before attaching program to an event
|
||||||
* pairs with bpf_prog_put()
|
* pairs with bpf_prog_put()
|
||||||
*/
|
*/
|
||||||
|
@ -670,7 +686,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
|
||||||
if (IS_ERR(prog))
|
if (IS_ERR(prog))
|
||||||
return prog;
|
return prog;
|
||||||
|
|
||||||
atomic_inc(&prog->aux->refcnt);
|
prog = bpf_prog_inc(prog);
|
||||||
fdput(f);
|
fdput(f);
|
||||||
|
|
||||||
return prog;
|
return prog;
|
||||||
|
|
|
@ -239,16 +239,6 @@ static const char * const reg_type_str[] = {
|
||||||
[CONST_IMM] = "imm",
|
[CONST_IMM] = "imm",
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct {
|
|
||||||
int map_type;
|
|
||||||
int func_id;
|
|
||||||
} func_limit[] = {
|
|
||||||
{BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
|
|
||||||
{BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
|
|
||||||
{BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output},
|
|
||||||
{BPF_MAP_TYPE_STACK_TRACE, BPF_FUNC_get_stackid},
|
|
||||||
};
|
|
||||||
|
|
||||||
static void print_verifier_state(struct verifier_env *env)
|
static void print_verifier_state(struct verifier_env *env)
|
||||||
{
|
{
|
||||||
enum bpf_reg_type t;
|
enum bpf_reg_type t;
|
||||||
|
@ -921,27 +911,52 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
|
||||||
|
|
||||||
static int check_map_func_compatibility(struct bpf_map *map, int func_id)
|
static int check_map_func_compatibility(struct bpf_map *map, int func_id)
|
||||||
{
|
{
|
||||||
bool bool_map, bool_func;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
if (!map)
|
if (!map)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(func_limit); i++) {
|
/* We need a two way check, first is from map perspective ... */
|
||||||
bool_map = (map->map_type == func_limit[i].map_type);
|
switch (map->map_type) {
|
||||||
bool_func = (func_id == func_limit[i].func_id);
|
case BPF_MAP_TYPE_PROG_ARRAY:
|
||||||
/* only when map & func pair match it can continue.
|
if (func_id != BPF_FUNC_tail_call)
|
||||||
* don't allow any other map type to be passed into
|
goto error;
|
||||||
* the special func;
|
break;
|
||||||
*/
|
case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
|
||||||
if (bool_func && bool_map != bool_func) {
|
if (func_id != BPF_FUNC_perf_event_read &&
|
||||||
verbose("cannot pass map_type %d into func %d\n",
|
func_id != BPF_FUNC_perf_event_output)
|
||||||
map->map_type, func_id);
|
goto error;
|
||||||
return -EINVAL;
|
break;
|
||||||
}
|
case BPF_MAP_TYPE_STACK_TRACE:
|
||||||
|
if (func_id != BPF_FUNC_get_stackid)
|
||||||
|
goto error;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ... and second from the function itself. */
|
||||||
|
switch (func_id) {
|
||||||
|
case BPF_FUNC_tail_call:
|
||||||
|
if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
|
||||||
|
goto error;
|
||||||
|
break;
|
||||||
|
case BPF_FUNC_perf_event_read:
|
||||||
|
case BPF_FUNC_perf_event_output:
|
||||||
|
if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
|
||||||
|
goto error;
|
||||||
|
break;
|
||||||
|
case BPF_FUNC_get_stackid:
|
||||||
|
if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
|
||||||
|
goto error;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
error:
|
||||||
|
verbose("cannot pass map_type %d into func %d\n",
|
||||||
|
map->map_type, func_id);
|
||||||
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int check_call(struct verifier_env *env, int func_id)
|
static int check_call(struct verifier_env *env, int func_id)
|
||||||
|
@ -2049,15 +2064,18 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
|
||||||
return -E2BIG;
|
return -E2BIG;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* remember this map */
|
|
||||||
env->used_maps[env->used_map_cnt++] = map;
|
|
||||||
|
|
||||||
/* hold the map. If the program is rejected by verifier,
|
/* hold the map. If the program is rejected by verifier,
|
||||||
* the map will be released by release_maps() or it
|
* the map will be released by release_maps() or it
|
||||||
* will be used by the valid program until it's unloaded
|
* will be used by the valid program until it's unloaded
|
||||||
* and all maps are released in free_bpf_prog_info()
|
* and all maps are released in free_bpf_prog_info()
|
||||||
*/
|
*/
|
||||||
bpf_map_inc(map, false);
|
map = bpf_map_inc(map, false);
|
||||||
|
if (IS_ERR(map)) {
|
||||||
|
fdput(f);
|
||||||
|
return PTR_ERR(map);
|
||||||
|
}
|
||||||
|
env->used_maps[env->used_map_cnt++] = map;
|
||||||
|
|
||||||
fdput(f);
|
fdput(f);
|
||||||
next_insn:
|
next_insn:
|
||||||
insn++;
|
insn++;
|
||||||
|
|
|
@ -572,8 +572,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
|
||||||
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
|
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
|
||||||
struct batadv_hard_iface *primary_if = NULL;
|
struct batadv_hard_iface *primary_if = NULL;
|
||||||
|
|
||||||
if (hard_iface->if_status == BATADV_IF_ACTIVE)
|
batadv_hardif_deactivate_interface(hard_iface);
|
||||||
batadv_hardif_deactivate_interface(hard_iface);
|
|
||||||
|
|
||||||
if (hard_iface->if_status != BATADV_IF_INACTIVE)
|
if (hard_iface->if_status != BATADV_IF_INACTIVE)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -663,6 +663,7 @@ batadv_neigh_node_new(struct batadv_orig_node *orig_node,
|
||||||
ether_addr_copy(neigh_node->addr, neigh_addr);
|
ether_addr_copy(neigh_node->addr, neigh_addr);
|
||||||
neigh_node->if_incoming = hard_iface;
|
neigh_node->if_incoming = hard_iface;
|
||||||
neigh_node->orig_node = orig_node;
|
neigh_node->orig_node = orig_node;
|
||||||
|
neigh_node->last_seen = jiffies;
|
||||||
|
|
||||||
/* extra reference for return */
|
/* extra reference for return */
|
||||||
kref_init(&neigh_node->refcount);
|
kref_init(&neigh_node->refcount);
|
||||||
|
|
|
@ -105,6 +105,15 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
|
||||||
neigh_node = NULL;
|
neigh_node = NULL;
|
||||||
|
|
||||||
spin_lock_bh(&orig_node->neigh_list_lock);
|
spin_lock_bh(&orig_node->neigh_list_lock);
|
||||||
|
/* curr_router used earlier may not be the current orig_ifinfo->router
|
||||||
|
* anymore because it was dereferenced outside of the neigh_list_lock
|
||||||
|
* protected region. After the new best neighbor has replace the current
|
||||||
|
* best neighbor the reference counter needs to decrease. Consequently,
|
||||||
|
* the code needs to ensure the curr_router variable contains a pointer
|
||||||
|
* to the replaced best neighbor.
|
||||||
|
*/
|
||||||
|
curr_router = rcu_dereference_protected(orig_ifinfo->router, true);
|
||||||
|
|
||||||
rcu_assign_pointer(orig_ifinfo->router, neigh_node);
|
rcu_assign_pointer(orig_ifinfo->router, neigh_node);
|
||||||
spin_unlock_bh(&orig_node->neigh_list_lock);
|
spin_unlock_bh(&orig_node->neigh_list_lock);
|
||||||
batadv_orig_ifinfo_put(orig_ifinfo);
|
batadv_orig_ifinfo_put(orig_ifinfo);
|
||||||
|
|
|
@ -675,6 +675,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
|
||||||
|
|
||||||
if (pending) {
|
if (pending) {
|
||||||
hlist_del(&forw_packet->list);
|
hlist_del(&forw_packet->list);
|
||||||
|
if (!forw_packet->own)
|
||||||
|
atomic_inc(&bat_priv->bcast_queue_left);
|
||||||
|
|
||||||
batadv_forw_packet_free(forw_packet);
|
batadv_forw_packet_free(forw_packet);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -702,6 +705,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
|
||||||
|
|
||||||
if (pending) {
|
if (pending) {
|
||||||
hlist_del(&forw_packet->list);
|
hlist_del(&forw_packet->list);
|
||||||
|
if (!forw_packet->own)
|
||||||
|
atomic_inc(&bat_priv->batman_queue_left);
|
||||||
|
|
||||||
batadv_forw_packet_free(forw_packet);
|
batadv_forw_packet_free(forw_packet);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -408,11 +408,17 @@ void batadv_interface_rx(struct net_device *soft_iface,
|
||||||
*/
|
*/
|
||||||
nf_reset(skb);
|
nf_reset(skb);
|
||||||
|
|
||||||
|
if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
|
||||||
|
goto dropped;
|
||||||
|
|
||||||
vid = batadv_get_vid(skb, 0);
|
vid = batadv_get_vid(skb, 0);
|
||||||
ethhdr = eth_hdr(skb);
|
ethhdr = eth_hdr(skb);
|
||||||
|
|
||||||
switch (ntohs(ethhdr->h_proto)) {
|
switch (ntohs(ethhdr->h_proto)) {
|
||||||
case ETH_P_8021Q:
|
case ETH_P_8021Q:
|
||||||
|
if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
|
||||||
|
goto dropped;
|
||||||
|
|
||||||
vhdr = (struct vlan_ethhdr *)skb->data;
|
vhdr = (struct vlan_ethhdr *)skb->data;
|
||||||
|
|
||||||
if (vhdr->h_vlan_encapsulated_proto != ethertype)
|
if (vhdr->h_vlan_encapsulated_proto != ethertype)
|
||||||
|
@ -424,8 +430,6 @@ void batadv_interface_rx(struct net_device *soft_iface,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* skb->dev & skb->pkt_type are set here */
|
/* skb->dev & skb->pkt_type are set here */
|
||||||
if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
|
|
||||||
goto dropped;
|
|
||||||
skb->protocol = eth_type_trans(skb, soft_iface);
|
skb->protocol = eth_type_trans(skb, soft_iface);
|
||||||
|
|
||||||
/* should not be necessary anymore as we use skb_pull_rcsum()
|
/* should not be necessary anymore as we use skb_pull_rcsum()
|
||||||
|
|
|
@ -470,6 +470,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
|
||||||
const struct sock *sk2,
|
const struct sock *sk2,
|
||||||
bool match_wildcard))
|
bool match_wildcard))
|
||||||
{
|
{
|
||||||
|
struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
|
||||||
struct sock *sk2;
|
struct sock *sk2;
|
||||||
struct hlist_nulls_node *node;
|
struct hlist_nulls_node *node;
|
||||||
kuid_t uid = sock_i_uid(sk);
|
kuid_t uid = sock_i_uid(sk);
|
||||||
|
@ -479,6 +480,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
|
||||||
sk2->sk_family == sk->sk_family &&
|
sk2->sk_family == sk->sk_family &&
|
||||||
ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
|
ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
|
||||||
sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
|
sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
|
||||||
|
inet_csk(sk2)->icsk_bind_hash == tb &&
|
||||||
sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
|
sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
|
||||||
saddr_same(sk, sk2, false))
|
saddr_same(sk, sk2, false))
|
||||||
return reuseport_add_sock(sk, sk2);
|
return reuseport_add_sock(sk, sk2);
|
||||||
|
|
|
@ -179,6 +179,7 @@ static __be16 tnl_flags_to_gre_flags(__be16 tflags)
|
||||||
return flags;
|
return flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Fills in tpi and returns header length to be pulled. */
|
||||||
static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
|
static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
|
||||||
bool *csum_err)
|
bool *csum_err)
|
||||||
{
|
{
|
||||||
|
@ -238,7 +239,7 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return iptunnel_pull_header(skb, hdr_len, tpi->proto, false);
|
return hdr_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ipgre_err(struct sk_buff *skb, u32 info,
|
static void ipgre_err(struct sk_buff *skb, u32 info,
|
||||||
|
@ -341,7 +342,7 @@ static void gre_err(struct sk_buff *skb, u32 info)
|
||||||
struct tnl_ptk_info tpi;
|
struct tnl_ptk_info tpi;
|
||||||
bool csum_err = false;
|
bool csum_err = false;
|
||||||
|
|
||||||
if (parse_gre_header(skb, &tpi, &csum_err)) {
|
if (parse_gre_header(skb, &tpi, &csum_err) < 0) {
|
||||||
if (!csum_err) /* ignore csum errors. */
|
if (!csum_err) /* ignore csum errors. */
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -419,6 +420,7 @@ static int gre_rcv(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct tnl_ptk_info tpi;
|
struct tnl_ptk_info tpi;
|
||||||
bool csum_err = false;
|
bool csum_err = false;
|
||||||
|
int hdr_len;
|
||||||
|
|
||||||
#ifdef CONFIG_NET_IPGRE_BROADCAST
|
#ifdef CONFIG_NET_IPGRE_BROADCAST
|
||||||
if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
|
if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
|
||||||
|
@ -428,7 +430,10 @@ static int gre_rcv(struct sk_buff *skb)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (parse_gre_header(skb, &tpi, &csum_err) < 0)
|
hdr_len = parse_gre_header(skb, &tpi, &csum_err);
|
||||||
|
if (hdr_len < 0)
|
||||||
|
goto drop;
|
||||||
|
if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false) < 0)
|
||||||
goto drop;
|
goto drop;
|
||||||
|
|
||||||
if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
|
if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
|
||||||
|
@ -523,7 +528,8 @@ static struct rtable *gre_get_rt(struct sk_buff *skb,
|
||||||
return ip_route_output_key(net, fl);
|
return ip_route_output_key(net, fl);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
|
static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||||
|
__be16 proto)
|
||||||
{
|
{
|
||||||
struct ip_tunnel_info *tun_info;
|
struct ip_tunnel_info *tun_info;
|
||||||
const struct ip_tunnel_key *key;
|
const struct ip_tunnel_key *key;
|
||||||
|
@ -575,7 +581,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
|
flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
|
||||||
build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB),
|
build_header(skb, tunnel_hlen, flags, proto,
|
||||||
tunnel_id_to_key(tun_info->key.tun_id), 0);
|
tunnel_id_to_key(tun_info->key.tun_id), 0);
|
||||||
|
|
||||||
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
|
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
|
||||||
|
@ -616,7 +622,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
|
||||||
const struct iphdr *tnl_params;
|
const struct iphdr *tnl_params;
|
||||||
|
|
||||||
if (tunnel->collect_md) {
|
if (tunnel->collect_md) {
|
||||||
gre_fb_xmit(skb, dev);
|
gre_fb_xmit(skb, dev, skb->protocol);
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -660,7 +666,7 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
|
||||||
struct ip_tunnel *tunnel = netdev_priv(dev);
|
struct ip_tunnel *tunnel = netdev_priv(dev);
|
||||||
|
|
||||||
if (tunnel->collect_md) {
|
if (tunnel->collect_md) {
|
||||||
gre_fb_xmit(skb, dev);
|
gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -893,7 +899,7 @@ static int ipgre_tunnel_init(struct net_device *dev)
|
||||||
netif_keep_dst(dev);
|
netif_keep_dst(dev);
|
||||||
dev->addr_len = 4;
|
dev->addr_len = 4;
|
||||||
|
|
||||||
if (iph->daddr) {
|
if (iph->daddr && !tunnel->collect_md) {
|
||||||
#ifdef CONFIG_NET_IPGRE_BROADCAST
|
#ifdef CONFIG_NET_IPGRE_BROADCAST
|
||||||
if (ipv4_is_multicast(iph->daddr)) {
|
if (ipv4_is_multicast(iph->daddr)) {
|
||||||
if (!iph->saddr)
|
if (!iph->saddr)
|
||||||
|
@ -902,8 +908,9 @@ static int ipgre_tunnel_init(struct net_device *dev)
|
||||||
dev->header_ops = &ipgre_header_ops;
|
dev->header_ops = &ipgre_header_ops;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
} else
|
} else if (!tunnel->collect_md) {
|
||||||
dev->header_ops = &ipgre_header_ops;
|
dev->header_ops = &ipgre_header_ops;
|
||||||
|
}
|
||||||
|
|
||||||
return ip_tunnel_init(dev);
|
return ip_tunnel_init(dev);
|
||||||
}
|
}
|
||||||
|
@ -946,6 +953,11 @@ static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
|
||||||
if (flags & (GRE_VERSION|GRE_ROUTING))
|
if (flags & (GRE_VERSION|GRE_ROUTING))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (data[IFLA_GRE_COLLECT_METADATA] &&
|
||||||
|
data[IFLA_GRE_ENCAP_TYPE] &&
|
||||||
|
nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -326,12 +326,12 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
|
||||||
|
|
||||||
if (!IS_ERR(rt)) {
|
if (!IS_ERR(rt)) {
|
||||||
tdev = rt->dst.dev;
|
tdev = rt->dst.dev;
|
||||||
dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst,
|
|
||||||
fl4.saddr);
|
|
||||||
ip_rt_put(rt);
|
ip_rt_put(rt);
|
||||||
}
|
}
|
||||||
if (dev->type != ARPHRD_ETHER)
|
if (dev->type != ARPHRD_ETHER)
|
||||||
dev->flags |= IFF_POINTOPOINT;
|
dev->flags |= IFF_POINTOPOINT;
|
||||||
|
|
||||||
|
dst_cache_reset(&tunnel->dst_cache);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!tdev && tunnel->parms.link)
|
if (!tdev && tunnel->parms.link)
|
||||||
|
|
|
@ -1376,9 +1376,9 @@ static int l2tp_tunnel_sock_create(struct net *net,
|
||||||
memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
|
memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
|
||||||
sizeof(udp_conf.peer_ip6));
|
sizeof(udp_conf.peer_ip6));
|
||||||
udp_conf.use_udp6_tx_checksums =
|
udp_conf.use_udp6_tx_checksums =
|
||||||
cfg->udp6_zero_tx_checksums;
|
! cfg->udp6_zero_tx_checksums;
|
||||||
udp_conf.use_udp6_rx_checksums =
|
udp_conf.use_udp6_rx_checksums =
|
||||||
cfg->udp6_zero_rx_checksums;
|
! cfg->udp6_zero_rx_checksums;
|
||||||
} else
|
} else
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
|
|
|
@ -1761,7 +1761,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
|
||||||
|
|
||||||
ret = dev_alloc_name(ndev, ndev->name);
|
ret = dev_alloc_name(ndev, ndev->name);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
free_netdev(ndev);
|
ieee80211_if_free(ndev);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1847,7 +1847,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
|
||||||
|
|
||||||
ret = register_netdevice(ndev);
|
ret = register_netdevice(ndev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
free_netdev(ndev);
|
ieee80211_if_free(ndev);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1444,6 +1444,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
|
||||||
int bearer_id = b->identity;
|
int bearer_id = b->identity;
|
||||||
struct tipc_link_entry *le;
|
struct tipc_link_entry *le;
|
||||||
u16 bc_ack = msg_bcast_ack(hdr);
|
u16 bc_ack = msg_bcast_ack(hdr);
|
||||||
|
u32 self = tipc_own_addr(net);
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
__skb_queue_head_init(&xmitq);
|
__skb_queue_head_init(&xmitq);
|
||||||
|
@ -1460,6 +1461,10 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
|
||||||
return tipc_node_bc_rcv(net, skb, bearer_id);
|
return tipc_node_bc_rcv(net, skb, bearer_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Discard unicast link messages destined for another node */
|
||||||
|
if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
|
||||||
|
goto discard;
|
||||||
|
|
||||||
/* Locate neighboring node that sent packet */
|
/* Locate neighboring node that sent packet */
|
||||||
n = tipc_node_find(net, msg_prevnode(hdr));
|
n = tipc_node_find(net, msg_prevnode(hdr));
|
||||||
if (unlikely(!n))
|
if (unlikely(!n))
|
||||||
|
|
|
@ -18,7 +18,6 @@ int bpf_prog1(struct pt_regs *ctx)
|
||||||
u64 cookie;
|
u64 cookie;
|
||||||
} data;
|
} data;
|
||||||
|
|
||||||
memset(&data, 0, sizeof(data));
|
|
||||||
data.pid = bpf_get_current_pid_tgid();
|
data.pid = bpf_get_current_pid_tgid();
|
||||||
data.cookie = 0x12345678;
|
data.cookie = 0x12345678;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue