Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Use an appropriate TSQ pacing shift in mac80211, from Toke
    Høiland-Jørgensen.

 2) Just like ipv4's ip_route_me_harder(), we have to use skb_to_full_sk
    in ip6_route_me_harder, from Eric Dumazet.

 3) Fix several shutdown races and similar other problems in l2tp, from
    James Chapman.

 4) Handle missing XDP flush properly in tuntap, for real this time.
    From Jason Wang.

 5) Out-of-bounds access in powerpc ebpf tailcalls, from Daniel
    Borkmann.

 6) Fix phy_resume() locking, from Andrew Lunn.

 7) IFLA_MTU values are ignored on newlink for some tunnel types, fix
    from Xin Long.

 8) Revert F-RTO middle box workarounds, they only handle one dimension
    of the problem. From Yuchung Cheng.

 9) Fix socket refcounting in RDS, from Ka-Cheong Poon.

10) Don't allow ppp unit registration to an unregistered channel, from
    Guillaume Nault.

11) Various hv_netvsc fixes from Stephen Hemminger.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (98 commits)
  hv_netvsc: propagate rx filters to VF
  hv_netvsc: filter multicast/broadcast
  hv_netvsc: defer queue selection to VF
  hv_netvsc: use napi_schedule_irqoff
  hv_netvsc: fix race in napi poll when rescheduling
  hv_netvsc: cancel subchannel setup before halting device
  hv_netvsc: fix error unwind handling if vmbus_open fails
  hv_netvsc: only wake transmit queue if link is up
  hv_netvsc: avoid retry on send during shutdown
  virtio-net: re enable XDP_REDIRECT for mergeable buffer
  ppp: prevent unregistered channels from connecting to PPP units
  tc-testing: skbmod: fix match value of ethertype
  mlxsw: spectrum_switchdev: Check success of FDB add operation
  net: make skb_gso_*_seglen functions private
  net: xfrm: use skb_gso_validate_network_len() to check gso sizes
  net: sched: tbf: handle GSO_BY_FRAGS case in enqueue
  net: rename skb_gso_validate_mtu -> skb_gso_validate_network_len
  rds: Incorrect reference counting in TCP socket creation
  net: ethtool: don't ignore return from driver get_fecparam method
  vrf: check forwarding on the original netdevice when generating ICMP dest unreachable
  ...
This commit is contained in:
Linus Torvalds 2018-03-05 11:29:24 -08:00
commit 547046141f
95 changed files with 1033 additions and 658 deletions

View File

@ -18,6 +18,7 @@ Required properties:
- "renesas,etheravb-r8a7795" for the R8A7795 SoC. - "renesas,etheravb-r8a7795" for the R8A7795 SoC.
- "renesas,etheravb-r8a7796" for the R8A7796 SoC. - "renesas,etheravb-r8a7796" for the R8A7796 SoC.
- "renesas,etheravb-r8a77970" for the R8A77970 SoC. - "renesas,etheravb-r8a77970" for the R8A77970 SoC.
- "renesas,etheravb-r8a77980" for the R8A77980 SoC.
- "renesas,etheravb-r8a77995" for the R8A77995 SoC. - "renesas,etheravb-r8a77995" for the R8A77995 SoC.
- "renesas,etheravb-rcar-gen3" as a fallback for the above - "renesas,etheravb-rcar-gen3" as a fallback for the above
R-Car Gen3 devices. R-Car Gen3 devices.

View File

@ -58,7 +58,6 @@ config MACH_KUROBOX_PRO
config MACH_DNS323 config MACH_DNS323
bool "D-Link DNS-323" bool "D-Link DNS-323"
select GENERIC_NET_UTILS
select I2C_BOARDINFO if I2C select I2C_BOARDINFO if I2C
help help
Say 'Y' here if you want your kernel to support the Say 'Y' here if you want your kernel to support the
@ -66,7 +65,6 @@ config MACH_DNS323
config MACH_TS209 config MACH_TS209
bool "QNAP TS-109/TS-209" bool "QNAP TS-109/TS-209"
select GENERIC_NET_UTILS
help help
Say 'Y' here if you want your kernel to support the Say 'Y' here if you want your kernel to support the
QNAP TS-109/TS-209 platform. QNAP TS-109/TS-209 platform.
@ -101,7 +99,6 @@ config MACH_LINKSTATION_LS_HGL
config MACH_TS409 config MACH_TS409
bool "QNAP TS-409" bool "QNAP TS-409"
select GENERIC_NET_UTILS
help help
Say 'Y' here if you want your kernel to support the Say 'Y' here if you want your kernel to support the
QNAP TS-409 platform. QNAP TS-409 platform.

View File

@ -173,10 +173,42 @@ static struct mv643xx_eth_platform_data dns323_eth_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(8), .phy_addr = MV643XX_ETH_PHY_ADDR(8),
}; };
/* dns323_parse_hex_*() taken from tsx09-common.c; should a common copy of these
* functions be kept somewhere?
*/
static int __init dns323_parse_hex_nibble(char n)
{
if (n >= '0' && n <= '9')
return n - '0';
if (n >= 'A' && n <= 'F')
return n - 'A' + 10;
if (n >= 'a' && n <= 'f')
return n - 'a' + 10;
return -1;
}
static int __init dns323_parse_hex_byte(const char *b)
{
int hi;
int lo;
hi = dns323_parse_hex_nibble(b[0]);
lo = dns323_parse_hex_nibble(b[1]);
if (hi < 0 || lo < 0)
return -1;
return (hi << 4) | lo;
}
static int __init dns323_read_mac_addr(void) static int __init dns323_read_mac_addr(void)
{ {
u_int8_t addr[6]; u_int8_t addr[6];
void __iomem *mac_page; int i;
char *mac_page;
/* MAC address is stored as a regular ol' string in /dev/mtdblock4 /* MAC address is stored as a regular ol' string in /dev/mtdblock4
* (0x007d0000-0x00800000) starting at offset 196480 (0x2ff80). * (0x007d0000-0x00800000) starting at offset 196480 (0x2ff80).
@ -185,8 +217,23 @@ static int __init dns323_read_mac_addr(void)
if (!mac_page) if (!mac_page)
return -ENOMEM; return -ENOMEM;
if (!mac_pton((__force const char *) mac_page, addr)) /* Sanity check the string we're looking at */
for (i = 0; i < 5; i++) {
if (*(mac_page + (i * 3) + 2) != ':') {
goto error_fail; goto error_fail;
}
}
for (i = 0; i < 6; i++) {
int byte;
byte = dns323_parse_hex_byte(mac_page + (i * 3));
if (byte < 0) {
goto error_fail;
}
addr[i] = byte;
}
iounmap(mac_page); iounmap(mac_page);
printk("DNS-323: Found ethernet MAC address: %pM\n", addr); printk("DNS-323: Found ethernet MAC address: %pM\n", addr);

View File

@ -53,13 +53,54 @@ struct mv643xx_eth_platform_data qnap_tsx09_eth_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(8), .phy_addr = MV643XX_ETH_PHY_ADDR(8),
}; };
static int __init qnap_tsx09_parse_hex_nibble(char n)
{
if (n >= '0' && n <= '9')
return n - '0';
if (n >= 'A' && n <= 'F')
return n - 'A' + 10;
if (n >= 'a' && n <= 'f')
return n - 'a' + 10;
return -1;
}
static int __init qnap_tsx09_parse_hex_byte(const char *b)
{
int hi;
int lo;
hi = qnap_tsx09_parse_hex_nibble(b[0]);
lo = qnap_tsx09_parse_hex_nibble(b[1]);
if (hi < 0 || lo < 0)
return -1;
return (hi << 4) | lo;
}
static int __init qnap_tsx09_check_mac_addr(const char *addr_str) static int __init qnap_tsx09_check_mac_addr(const char *addr_str)
{ {
u_int8_t addr[6]; u_int8_t addr[6];
int i;
if (!mac_pton(addr_str, addr)) for (i = 0; i < 6; i++) {
int byte;
/*
* Enforce "xx:xx:xx:xx:xx:xx\n" format.
*/
if (addr_str[(i * 3) + 2] != ((i < 5) ? ':' : '\n'))
return -1; return -1;
byte = qnap_tsx09_parse_hex_byte(addr_str + (i * 3));
if (byte < 0)
return -1;
addr[i] = byte;
}
printk(KERN_INFO "tsx09: found ethernet mac address %pM\n", addr); printk(KERN_INFO "tsx09: found ethernet mac address %pM\n", addr);
memcpy(qnap_tsx09_eth_data.mac_addr, addr, 6); memcpy(qnap_tsx09_eth_data.mac_addr, addr, 6);
@ -77,12 +118,12 @@ void __init qnap_tsx09_find_mac_addr(u32 mem_base, u32 size)
unsigned long addr; unsigned long addr;
for (addr = mem_base; addr < (mem_base + size); addr += 1024) { for (addr = mem_base; addr < (mem_base + size); addr += 1024) {
void __iomem *nor_page; char *nor_page;
int ret = 0; int ret = 0;
nor_page = ioremap(addr, 1024); nor_page = ioremap(addr, 1024);
if (nor_page != NULL) { if (nor_page != NULL) {
ret = qnap_tsx09_check_mac_addr((__force const char *)nor_page); ret = qnap_tsx09_check_mac_addr(nor_page);
iounmap(nor_page); iounmap(nor_page);
} }

View File

@ -240,6 +240,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
* goto out; * goto out;
*/ */
PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)); PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31);
PPC_CMPLW(b2p_index, b2p[TMP_REG_1]); PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
PPC_BCC(COND_GE, out); PPC_BCC(COND_GE, out);

View File

@ -21,6 +21,7 @@
* *
*/ */
#include <linux/dmi.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/usb.h> #include <linux/usb.h>
#include <linux/usb/quirks.h> #include <linux/usb/quirks.h>
@ -379,6 +380,21 @@ static const struct usb_device_id blacklist_table[] = {
{ } /* Terminating entry */ { } /* Terminating entry */
}; };
/* The Bluetooth USB module build into some devices needs to be reset on resume,
* this is a problem with the platform (likely shutting off all power) not with
* the module itself. So we use a DMI list to match known broken platforms.
*/
static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
{
/* Lenovo Yoga 920 (QCA Rome device 0cf3:e300) */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920"),
},
},
{}
};
#define BTUSB_MAX_ISOC_FRAMES 10 #define BTUSB_MAX_ISOC_FRAMES 10
#define BTUSB_INTR_RUNNING 0 #define BTUSB_INTR_RUNNING 0
@ -2945,6 +2961,9 @@ static int btusb_probe(struct usb_interface *intf,
hdev->send = btusb_send_frame; hdev->send = btusb_send_frame;
hdev->notify = btusb_notify; hdev->notify = btusb_notify;
if (dmi_check_system(btusb_needs_reset_resume_table))
interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
#ifdef CONFIG_PM #ifdef CONFIG_PM
err = btusb_config_oob_wake(hdev); err = btusb_config_oob_wake(hdev);
if (err) if (err)
@ -3031,12 +3050,6 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_QCA_ROME) { if (id->driver_info & BTUSB_QCA_ROME) {
data->setup_on_usb = btusb_setup_qca; data->setup_on_usb = btusb_setup_qca;
hdev->set_bdaddr = btusb_set_bdaddr_ath3012; hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
/* QCA Rome devices lose their updated firmware over suspend,
* but the USB hub doesn't notice any status change.
* explicitly request a device reset on resume.
*/
interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
} }
#ifdef CONFIG_BT_HCIBTUSB_RTL #ifdef CONFIG_BT_HCIBTUSB_RTL

View File

@ -922,12 +922,13 @@ static int bcm_get_resources(struct bcm_device *dev)
dev->clk = devm_clk_get(dev->dev, NULL); dev->clk = devm_clk_get(dev->dev, NULL);
dev->device_wakeup = devm_gpiod_get(dev->dev, "device-wakeup", dev->device_wakeup = devm_gpiod_get_optional(dev->dev, "device-wakeup",
GPIOD_OUT_LOW); GPIOD_OUT_LOW);
if (IS_ERR(dev->device_wakeup)) if (IS_ERR(dev->device_wakeup))
return PTR_ERR(dev->device_wakeup); return PTR_ERR(dev->device_wakeup);
dev->shutdown = devm_gpiod_get(dev->dev, "shutdown", GPIOD_OUT_LOW); dev->shutdown = devm_gpiod_get_optional(dev->dev, "shutdown",
GPIOD_OUT_LOW);
if (IS_ERR(dev->shutdown)) if (IS_ERR(dev->shutdown))
return PTR_ERR(dev->shutdown); return PTR_ERR(dev->shutdown);

View File

@ -3063,9 +3063,6 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
if (ndev->features & NETIF_F_RXCSUM) if (ndev->features & NETIF_F_RXCSUM)
gfar_rx_checksum(skb, fcb); gfar_rx_checksum(skb, fcb);
/* Tell the skb what kind of packet this is */
skb->protocol = eth_type_trans(skb, ndev);
/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
* Even if vlan rx accel is disabled, on some chips * Even if vlan rx accel is disabled, on some chips
* RXFCB_VLN is pseudo randomly set. * RXFCB_VLN is pseudo randomly set.
@ -3136,13 +3133,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
continue; continue;
} }
gfar_process_frame(ndev, skb);
/* Increment the number of packets */ /* Increment the number of packets */
total_pkts++; total_pkts++;
total_bytes += skb->len; total_bytes += skb->len;
skb_record_rx_queue(skb, rx_queue->qindex); skb_record_rx_queue(skb, rx_queue->qindex);
gfar_process_frame(ndev, skb); skb->protocol = eth_type_trans(skb, ndev);
/* Send the packet up the stack */ /* Send the packet up the stack */
napi_gro_receive(&rx_queue->grp->napi_rx, skb); napi_gro_receive(&rx_queue->grp->napi_rx, skb);

View File

@ -1888,6 +1888,14 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
ixgbe_rx_pg_size(rx_ring), ixgbe_rx_pg_size(rx_ring),
DMA_FROM_DEVICE, DMA_FROM_DEVICE,
IXGBE_RX_DMA_ATTR); IXGBE_RX_DMA_ATTR);
} else if (ring_uses_build_skb(rx_ring)) {
unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
dma_sync_single_range_for_cpu(rx_ring->dev,
IXGBE_CB(skb)->dma,
offset,
skb_headlen(skb),
DMA_FROM_DEVICE);
} else { } else {
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];

View File

@ -107,20 +107,20 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12), MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3), MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3),
MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9), MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9),
MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x14, 0, 8),
MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x14, 9, 2),
MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x14, 11, 6),
MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32),
MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x20, 8),
MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x28, 8),
MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x30, 8),
MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16), MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16),
MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16), MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16),
MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x20, 0, 32),
MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x24, 0, 32),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x20, 8),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x28, 8),
MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x30, 8),
MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x38, 8),
}; };
#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x38 #define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40
struct mlxsw_afk_element_inst { /* element instance in actual block */ struct mlxsw_afk_element_inst { /* element instance in actual block */
const struct mlxsw_afk_element_info *info; const struct mlxsw_afk_element_info *info;

View File

@ -1459,6 +1459,7 @@ mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
} }
mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
mlxsw_sp_port_vlan->ref_count = 1;
mlxsw_sp_port_vlan->vid = vid; mlxsw_sp_port_vlan->vid = vid;
list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
@ -1486,8 +1487,10 @@ mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
if (mlxsw_sp_port_vlan) if (mlxsw_sp_port_vlan) {
mlxsw_sp_port_vlan->ref_count++;
return mlxsw_sp_port_vlan; return mlxsw_sp_port_vlan;
}
return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
} }
@ -1496,6 +1499,9 @@ void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{ {
struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
if (--mlxsw_sp_port_vlan->ref_count != 0)
return;
if (mlxsw_sp_port_vlan->bridge_port) if (mlxsw_sp_port_vlan->bridge_port)
mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
else if (fid) else if (fid)
@ -4207,13 +4213,12 @@ static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = {
.size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate, .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate,
}; };
static struct devlink_resource_size_params mlxsw_sp_kvd_size_params;
static struct devlink_resource_size_params mlxsw_sp_linear_size_params;
static struct devlink_resource_size_params mlxsw_sp_hash_single_size_params;
static struct devlink_resource_size_params mlxsw_sp_hash_double_size_params;
static void static void
mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core) mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
struct devlink_resource_size_params *kvd_size_params,
struct devlink_resource_size_params *linear_size_params,
struct devlink_resource_size_params *hash_double_size_params,
struct devlink_resource_size_params *hash_single_size_params)
{ {
u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
KVD_SINGLE_MIN_SIZE); KVD_SINGLE_MIN_SIZE);
@ -4222,37 +4227,35 @@ mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core)
u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
u32 linear_size_min = 0; u32 linear_size_min = 0;
/* KVD top resource */ devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
mlxsw_sp_kvd_size_params.size_min = kvd_size; MLXSW_SP_KVD_GRANULARITY,
mlxsw_sp_kvd_size_params.size_max = kvd_size; DEVLINK_RESOURCE_UNIT_ENTRY);
mlxsw_sp_kvd_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; devlink_resource_size_params_init(linear_size_params, linear_size_min,
mlxsw_sp_kvd_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; kvd_size - single_size_min -
double_size_min,
/* Linear part init */ MLXSW_SP_KVD_GRANULARITY,
mlxsw_sp_linear_size_params.size_min = linear_size_min; DEVLINK_RESOURCE_UNIT_ENTRY);
mlxsw_sp_linear_size_params.size_max = kvd_size - single_size_min - devlink_resource_size_params_init(hash_double_size_params,
double_size_min; double_size_min,
mlxsw_sp_linear_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; kvd_size - single_size_min -
mlxsw_sp_linear_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; linear_size_min,
MLXSW_SP_KVD_GRANULARITY,
/* Hash double part init */ DEVLINK_RESOURCE_UNIT_ENTRY);
mlxsw_sp_hash_double_size_params.size_min = double_size_min; devlink_resource_size_params_init(hash_single_size_params,
mlxsw_sp_hash_double_size_params.size_max = kvd_size - single_size_min - single_size_min,
linear_size_min; kvd_size - double_size_min -
mlxsw_sp_hash_double_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; linear_size_min,
mlxsw_sp_hash_double_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; MLXSW_SP_KVD_GRANULARITY,
DEVLINK_RESOURCE_UNIT_ENTRY);
/* Hash single part init */
mlxsw_sp_hash_single_size_params.size_min = single_size_min;
mlxsw_sp_hash_single_size_params.size_max = kvd_size - double_size_min -
linear_size_min;
mlxsw_sp_hash_single_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
mlxsw_sp_hash_single_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
} }
static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
{ {
struct devlink *devlink = priv_to_devlink(mlxsw_core); struct devlink *devlink = priv_to_devlink(mlxsw_core);
struct devlink_resource_size_params hash_single_size_params;
struct devlink_resource_size_params hash_double_size_params;
struct devlink_resource_size_params linear_size_params;
struct devlink_resource_size_params kvd_size_params;
u32 kvd_size, single_size, double_size, linear_size; u32 kvd_size, single_size, double_size, linear_size;
const struct mlxsw_config_profile *profile; const struct mlxsw_config_profile *profile;
int err; int err;
@ -4261,13 +4264,17 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
return -EIO; return -EIO;
mlxsw_sp_resource_size_params_prepare(mlxsw_core); mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
&linear_size_params,
&hash_double_size_params,
&hash_single_size_params);
kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
true, kvd_size, true, kvd_size,
MLXSW_SP_RESOURCE_KVD, MLXSW_SP_RESOURCE_KVD,
DEVLINK_RESOURCE_ID_PARENT_TOP, DEVLINK_RESOURCE_ID_PARENT_TOP,
&mlxsw_sp_kvd_size_params, &kvd_size_params,
&mlxsw_sp_resource_kvd_ops); &mlxsw_sp_resource_kvd_ops);
if (err) if (err)
return err; return err;
@ -4277,7 +4284,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
false, linear_size, false, linear_size,
MLXSW_SP_RESOURCE_KVD_LINEAR, MLXSW_SP_RESOURCE_KVD_LINEAR,
MLXSW_SP_RESOURCE_KVD, MLXSW_SP_RESOURCE_KVD,
&mlxsw_sp_linear_size_params, &linear_size_params,
&mlxsw_sp_resource_kvd_linear_ops); &mlxsw_sp_resource_kvd_linear_ops);
if (err) if (err)
return err; return err;
@ -4291,7 +4298,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
false, double_size, false, double_size,
MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
MLXSW_SP_RESOURCE_KVD, MLXSW_SP_RESOURCE_KVD,
&mlxsw_sp_hash_double_size_params, &hash_double_size_params,
&mlxsw_sp_resource_kvd_hash_double_ops); &mlxsw_sp_resource_kvd_hash_double_ops);
if (err) if (err)
return err; return err;
@ -4301,7 +4308,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
false, single_size, false, single_size,
MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
MLXSW_SP_RESOURCE_KVD, MLXSW_SP_RESOURCE_KVD,
&mlxsw_sp_hash_single_size_params, &hash_single_size_params,
&mlxsw_sp_resource_kvd_hash_single_ops); &mlxsw_sp_resource_kvd_hash_single_ops);
if (err) if (err)
return err; return err;

View File

@ -211,6 +211,7 @@ struct mlxsw_sp_port_vlan {
struct list_head list; struct list_head list;
struct mlxsw_sp_port *mlxsw_sp_port; struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_sp_fid *fid; struct mlxsw_sp_fid *fid;
unsigned int ref_count;
u16 vid; u16 vid;
struct mlxsw_sp_bridge_port *bridge_port; struct mlxsw_sp_bridge_port *bridge_port;
struct list_head bridge_vlan_node; struct list_head bridge_vlan_node;

View File

@ -112,11 +112,11 @@ static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
[MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1, [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1,
[MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1, [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1,
[MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1, [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1,
[MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1,
}; };
static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
[MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1, [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1,
[MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1,
}; };
static const int *mlxsw_sp_packet_type_sfgc_types[] = { static const int *mlxsw_sp_packet_type_sfgc_types[] = {

View File

@ -1203,6 +1203,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
bool dynamic) bool dynamic)
{ {
char *sfd_pl; char *sfd_pl;
u8 num_rec;
int err; int err;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@ -1212,9 +1213,16 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
mac, fid, action, local_port); mac, fid, action, local_port);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
kfree(sfd_pl); if (err)
goto out;
if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
err = -EBUSY;
out:
kfree(sfd_pl);
return err; return err;
} }
@ -1239,6 +1247,7 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
bool adding, bool dynamic) bool adding, bool dynamic)
{ {
char *sfd_pl; char *sfd_pl;
u8 num_rec;
int err; int err;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@ -1249,9 +1258,16 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
lag_vid, lag_id); lag_vid, lag_id);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
kfree(sfd_pl); if (err)
goto out;
if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
err = -EBUSY;
out:
kfree(sfd_pl);
return err; return err;
} }
@ -1296,6 +1312,7 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
u16 fid, u16 mid_idx, bool adding) u16 fid, u16 mid_idx, bool adding)
{ {
char *sfd_pl; char *sfd_pl;
u8 num_rec;
int err; int err;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@ -1305,7 +1322,15 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx); MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
goto out;
if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
err = -EBUSY;
out:
kfree(sfd_pl); kfree(sfd_pl);
return err; return err;
} }

View File

@ -439,6 +439,17 @@ static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
enum_index); enum_index);
} }
static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
int enum_index)
{
iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
}
static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
{
return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]);
}
static bool sh_eth_is_gether(struct sh_eth_private *mdp) static bool sh_eth_is_gether(struct sh_eth_private *mdp)
{ {
return mdp->reg_offset == sh_eth_offset_gigabit; return mdp->reg_offset == sh_eth_offset_gigabit;

View File

@ -567,15 +567,4 @@ static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp,
return mdp->tsu_addr + mdp->reg_offset[enum_index]; return mdp->tsu_addr + mdp->reg_offset[enum_index];
} }
static inline void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
int enum_index)
{
iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
}
static inline u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
{
return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]);
}
#endif /* #ifndef __SH_ETH_H__ */ #endif /* #ifndef __SH_ETH_H__ */

View File

@ -852,13 +852,6 @@ int netvsc_send(struct net_device *ndev,
if (unlikely(!net_device || net_device->destroy)) if (unlikely(!net_device || net_device->destroy))
return -ENODEV; return -ENODEV;
/* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get
* here before the negotiation with the host is finished and
* send_section_map may not be allocated yet.
*/
if (unlikely(!net_device->send_section_map))
return -EAGAIN;
nvchan = &net_device->chan_table[packet->q_idx]; nvchan = &net_device->chan_table[packet->q_idx];
packet->send_buf_index = NETVSC_INVALID_INDEX; packet->send_buf_index = NETVSC_INVALID_INDEX;
packet->cp_partial = false; packet->cp_partial = false;
@ -866,10 +859,8 @@ int netvsc_send(struct net_device *ndev,
/* Send control message directly without accessing msd (Multi-Send /* Send control message directly without accessing msd (Multi-Send
* Data) field which may be changed during data packet processing. * Data) field which may be changed during data packet processing.
*/ */
if (!skb) { if (!skb)
cur_send = packet; return netvsc_send_pkt(device, packet, net_device, pb, skb);
goto send_now;
}
/* batch packets in send buffer if possible */ /* batch packets in send buffer if possible */
msdp = &nvchan->msd; msdp = &nvchan->msd;
@ -953,7 +944,6 @@ int netvsc_send(struct net_device *ndev,
} }
} }
send_now:
if (cur_send) if (cur_send)
ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb); ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
@ -1217,9 +1207,10 @@ int netvsc_poll(struct napi_struct *napi, int budget)
if (send_recv_completions(ndev, net_device, nvchan) == 0 && if (send_recv_completions(ndev, net_device, nvchan) == 0 &&
work_done < budget && work_done < budget &&
napi_complete_done(napi, work_done) && napi_complete_done(napi, work_done) &&
hv_end_read(&channel->inbound)) { hv_end_read(&channel->inbound) &&
napi_schedule_prep(napi)) {
hv_begin_read(&channel->inbound); hv_begin_read(&channel->inbound);
napi_reschedule(napi); __napi_schedule(napi);
} }
/* Driver may overshoot since multiple packets per descriptor */ /* Driver may overshoot since multiple packets per descriptor */
@ -1242,7 +1233,7 @@ void netvsc_channel_cb(void *context)
/* disable interupts from host */ /* disable interupts from host */
hv_begin_read(rbi); hv_begin_read(rbi);
__napi_schedule(&nvchan->napi); __napi_schedule_irqoff(&nvchan->napi);
} }
} }
@ -1296,7 +1287,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
netvsc_channel_cb, net_device->chan_table); netvsc_channel_cb, net_device->chan_table);
if (ret != 0) { if (ret != 0) {
netif_napi_del(&net_device->chan_table[0].napi);
netdev_err(ndev, "unable to open channel: %d\n", ret); netdev_err(ndev, "unable to open channel: %d\n", ret);
goto cleanup; goto cleanup;
} }
@ -1306,11 +1296,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
napi_enable(&net_device->chan_table[0].napi); napi_enable(&net_device->chan_table[0].napi);
/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
* populated.
*/
rcu_assign_pointer(net_device_ctx->nvdev, net_device);
/* Connect with the NetVsp */ /* Connect with the NetVsp */
ret = netvsc_connect_vsp(device, net_device, device_info); ret = netvsc_connect_vsp(device, net_device, device_info);
if (ret != 0) { if (ret != 0) {
@ -1319,6 +1304,11 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
goto close; goto close;
} }
/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
* populated.
*/
rcu_assign_pointer(net_device_ctx->nvdev, net_device);
return net_device; return net_device;
close: close:
@ -1329,6 +1319,7 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
vmbus_close(device->channel); vmbus_close(device->channel);
cleanup: cleanup:
netif_napi_del(&net_device->chan_table[0].napi);
free_netvsc_device(&net_device->rcu); free_netvsc_device(&net_device->rcu);
return ERR_PTR(ret); return ERR_PTR(ret);

View File

@ -66,10 +66,36 @@ static int debug = -1;
module_param(debug, int, S_IRUGO); module_param(debug, int, S_IRUGO);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
static void netvsc_set_multicast_list(struct net_device *net) static void netvsc_change_rx_flags(struct net_device *net, int change)
{ {
struct net_device_context *net_device_ctx = netdev_priv(net); struct net_device_context *ndev_ctx = netdev_priv(net);
struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
int inc;
if (!vf_netdev)
return;
if (change & IFF_PROMISC) {
inc = (net->flags & IFF_PROMISC) ? 1 : -1;
dev_set_promiscuity(vf_netdev, inc);
}
if (change & IFF_ALLMULTI) {
inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
dev_set_allmulti(vf_netdev, inc);
}
}
static void netvsc_set_rx_mode(struct net_device *net)
{
struct net_device_context *ndev_ctx = netdev_priv(net);
struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
if (vf_netdev) {
dev_uc_sync(vf_netdev, net);
dev_mc_sync(vf_netdev, net);
}
rndis_filter_update(nvdev); rndis_filter_update(nvdev);
} }
@ -91,12 +117,11 @@ static int netvsc_open(struct net_device *net)
return ret; return ret;
} }
netif_tx_wake_all_queues(net);
rdev = nvdev->extension; rdev = nvdev->extension;
if (!rdev->link_state) {
if (!rdev->link_state)
netif_carrier_on(net); netif_carrier_on(net);
netif_tx_wake_all_queues(net);
}
if (vf_netdev) { if (vf_netdev) {
/* Setting synthetic device up transparently sets /* Setting synthetic device up transparently sets
@ -299,8 +324,19 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
rcu_read_lock(); rcu_read_lock();
vf_netdev = rcu_dereference(ndc->vf_netdev); vf_netdev = rcu_dereference(ndc->vf_netdev);
if (vf_netdev) { if (vf_netdev) {
txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
if (vf_ops->ndo_select_queue)
txq = vf_ops->ndo_select_queue(vf_netdev, skb,
accel_priv, fallback);
else
txq = fallback(vf_netdev, skb);
/* Record the queue selected by VF so that it can be
* used for common case where VF has more queues than
* the synthetic device.
*/
qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
} else { } else {
txq = netvsc_pick_tx(ndev, skb); txq = netvsc_pick_tx(ndev, skb);
} }
@ -1576,7 +1612,8 @@ static const struct net_device_ops device_ops = {
.ndo_open = netvsc_open, .ndo_open = netvsc_open,
.ndo_stop = netvsc_close, .ndo_stop = netvsc_close,
.ndo_start_xmit = netvsc_start_xmit, .ndo_start_xmit = netvsc_start_xmit,
.ndo_set_rx_mode = netvsc_set_multicast_list, .ndo_change_rx_flags = netvsc_change_rx_flags,
.ndo_set_rx_mode = netvsc_set_rx_mode,
.ndo_change_mtu = netvsc_change_mtu, .ndo_change_mtu = netvsc_change_mtu,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = netvsc_set_mac_addr, .ndo_set_mac_address = netvsc_set_mac_addr,
@ -1807,6 +1844,11 @@ static void __netvsc_vf_setup(struct net_device *ndev,
netdev_warn(vf_netdev, netdev_warn(vf_netdev,
"unable to change mtu to %u\n", ndev->mtu); "unable to change mtu to %u\n", ndev->mtu);
/* set multicast etc flags on VF */
dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE);
dev_uc_sync(vf_netdev, ndev);
dev_mc_sync(vf_netdev, ndev);
if (netif_running(ndev)) { if (netif_running(ndev)) {
ret = dev_open(vf_netdev); ret = dev_open(vf_netdev);
if (ret) if (ret)

View File

@ -854,15 +854,19 @@ static void rndis_set_multicast(struct work_struct *w)
{ {
struct rndis_device *rdev struct rndis_device *rdev
= container_of(w, struct rndis_device, mcast_work); = container_of(w, struct rndis_device, mcast_work);
u32 filter = NDIS_PACKET_TYPE_DIRECTED;
unsigned int flags = rdev->ndev->flags;
if (rdev->ndev->flags & IFF_PROMISC) if (flags & IFF_PROMISC) {
rndis_filter_set_packet_filter(rdev, filter = NDIS_PACKET_TYPE_PROMISCUOUS;
NDIS_PACKET_TYPE_PROMISCUOUS); } else {
else if (flags & IFF_ALLMULTI)
rndis_filter_set_packet_filter(rdev, flags |= NDIS_PACKET_TYPE_ALL_MULTICAST;
NDIS_PACKET_TYPE_BROADCAST | if (flags & IFF_BROADCAST)
NDIS_PACKET_TYPE_ALL_MULTICAST | flags |= NDIS_PACKET_TYPE_BROADCAST;
NDIS_PACKET_TYPE_DIRECTED); }
rndis_filter_set_packet_filter(rdev, filter);
} }
void rndis_filter_update(struct netvsc_device *nvdev) void rndis_filter_update(struct netvsc_device *nvdev)
@ -1340,6 +1344,9 @@ void rndis_filter_device_remove(struct hv_device *dev,
{ {
struct rndis_device *rndis_dev = net_dev->extension; struct rndis_device *rndis_dev = net_dev->extension;
/* Don't try and setup sub channels if about to halt */
cancel_work_sync(&net_dev->subchan_work);
/* Halt and release the rndis device */ /* Halt and release the rndis device */
rndis_filter_halt_device(rndis_dev); rndis_filter_halt_device(rndis_dev);

View File

@ -819,7 +819,7 @@ void phy_start(struct phy_device *phydev)
break; break;
case PHY_HALTED: case PHY_HALTED:
/* if phy was suspended, bring the physical link up again */ /* if phy was suspended, bring the physical link up again */
phy_resume(phydev); __phy_resume(phydev);
/* make sure interrupts are re-enabled for the PHY */ /* make sure interrupts are re-enabled for the PHY */
if (phy_interrupt_is_valid(phydev)) { if (phy_interrupt_is_valid(phydev)) {

View File

@ -135,9 +135,7 @@ static int mdio_bus_phy_resume(struct device *dev)
if (!mdio_bus_phy_may_suspend(phydev)) if (!mdio_bus_phy_may_suspend(phydev))
goto no_resume; goto no_resume;
mutex_lock(&phydev->lock);
ret = phy_resume(phydev); ret = phy_resume(phydev);
mutex_unlock(&phydev->lock);
if (ret < 0) if (ret < 0)
return ret; return ret;
@ -1041,9 +1039,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
if (err) if (err)
goto error; goto error;
mutex_lock(&phydev->lock);
phy_resume(phydev); phy_resume(phydev);
mutex_unlock(&phydev->lock);
phy_led_triggers_register(phydev); phy_led_triggers_register(phydev);
return err; return err;
@ -1172,7 +1168,7 @@ int phy_suspend(struct phy_device *phydev)
} }
EXPORT_SYMBOL(phy_suspend); EXPORT_SYMBOL(phy_suspend);
int phy_resume(struct phy_device *phydev) int __phy_resume(struct phy_device *phydev)
{ {
struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
int ret = 0; int ret = 0;
@ -1189,6 +1185,18 @@ int phy_resume(struct phy_device *phydev)
return ret; return ret;
} }
EXPORT_SYMBOL(__phy_resume);
int phy_resume(struct phy_device *phydev)
{
int ret;
mutex_lock(&phydev->lock);
ret = __phy_resume(phydev);
mutex_unlock(&phydev->lock);
return ret;
}
EXPORT_SYMBOL(phy_resume); EXPORT_SYMBOL(phy_resume);
int phy_loopback(struct phy_device *phydev, bool enable) int phy_loopback(struct phy_device *phydev, bool enable)

View File

@ -3161,6 +3161,15 @@ ppp_connect_channel(struct channel *pch, int unit)
goto outl; goto outl;
ppp_lock(ppp); ppp_lock(ppp);
spin_lock_bh(&pch->downl);
if (!pch->chan) {
/* Don't connect unregistered channels */
spin_unlock_bh(&pch->downl);
ppp_unlock(ppp);
ret = -ENOTCONN;
goto outl;
}
spin_unlock_bh(&pch->downl);
if (pch->file.hdrlen > ppp->file.hdrlen) if (pch->file.hdrlen > ppp->file.hdrlen)
ppp->file.hdrlen = pch->file.hdrlen; ppp->file.hdrlen = pch->file.hdrlen;
hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */

View File

@ -181,7 +181,6 @@ struct tun_file {
struct tun_struct *detached; struct tun_struct *detached;
struct ptr_ring tx_ring; struct ptr_ring tx_ring;
struct xdp_rxq_info xdp_rxq; struct xdp_rxq_info xdp_rxq;
int xdp_pending_pkts;
}; };
struct tun_flow_entry { struct tun_flow_entry {
@ -1643,6 +1642,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
else else
*skb_xdp = 0; *skb_xdp = 0;
preempt_disable();
rcu_read_lock(); rcu_read_lock();
xdp_prog = rcu_dereference(tun->xdp_prog); xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog && !*skb_xdp) { if (xdp_prog && !*skb_xdp) {
@ -1662,11 +1662,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
case XDP_REDIRECT: case XDP_REDIRECT:
get_page(alloc_frag->page); get_page(alloc_frag->page);
alloc_frag->offset += buflen; alloc_frag->offset += buflen;
++tfile->xdp_pending_pkts;
err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
xdp_do_flush_map();
if (err) if (err)
goto err_redirect; goto err_redirect;
rcu_read_unlock(); rcu_read_unlock();
preempt_enable();
return NULL; return NULL;
case XDP_TX: case XDP_TX:
xdp_xmit = true; xdp_xmit = true;
@ -1688,6 +1689,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
skb = build_skb(buf, buflen); skb = build_skb(buf, buflen);
if (!skb) { if (!skb) {
rcu_read_unlock(); rcu_read_unlock();
preempt_enable();
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
@ -1700,10 +1702,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
skb->dev = tun->dev; skb->dev = tun->dev;
generic_xdp_tx(skb, xdp_prog); generic_xdp_tx(skb, xdp_prog);
rcu_read_unlock(); rcu_read_unlock();
preempt_enable();
return NULL; return NULL;
} }
rcu_read_unlock(); rcu_read_unlock();
preempt_enable();
return skb; return skb;
@ -1711,6 +1715,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
put_page(alloc_frag->page); put_page(alloc_frag->page);
err_xdp: err_xdp:
rcu_read_unlock(); rcu_read_unlock();
preempt_enable();
this_cpu_inc(tun->pcpu_stats->rx_dropped); this_cpu_inc(tun->pcpu_stats->rx_dropped);
return NULL; return NULL;
} }
@ -1984,11 +1989,6 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
result = tun_get_user(tun, tfile, NULL, from, result = tun_get_user(tun, tfile, NULL, from,
file->f_flags & O_NONBLOCK, false); file->f_flags & O_NONBLOCK, false);
if (tfile->xdp_pending_pkts) {
tfile->xdp_pending_pkts = 0;
xdp_do_flush_map();
}
tun_put(tun); tun_put(tun);
return result; return result;
} }
@ -2325,13 +2325,6 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
m->msg_flags & MSG_DONTWAIT, m->msg_flags & MSG_DONTWAIT,
m->msg_flags & MSG_MORE); m->msg_flags & MSG_MORE);
if (tfile->xdp_pending_pkts >= NAPI_POLL_WEIGHT ||
!(m->msg_flags & MSG_MORE)) {
tfile->xdp_pending_pkts = 0;
xdp_do_flush_map();
}
tun_put(tun); tun_put(tun);
return ret; return ret;
} }
@ -3163,7 +3156,6 @@ static int tun_chr_open(struct inode *inode, struct file * file)
sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
tfile->xdp_pending_pkts = 0;
return 0; return 0;
} }

View File

@ -895,6 +895,12 @@ static const struct usb_device_id products[] = {
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE), USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info, .driver_info = (unsigned long)&wwan_info,
}, {
/* Cinterion PLS8 modem by GEMALTO */
USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0061, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, { }, {
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE), USB_CDC_PROTO_NONE),

View File

@ -1794,7 +1794,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
tx_data += len; tx_data += len;
agg->skb_len += len; agg->skb_len += len;
agg->skb_num++; agg->skb_num += skb_shinfo(skb)->gso_segs ?: 1;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);

View File

@ -504,6 +504,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
page_off += *len; page_off += *len;
while (--*num_buf) { while (--*num_buf) {
int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
unsigned int buflen; unsigned int buflen;
void *buf; void *buf;
int off; int off;
@ -518,7 +519,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
/* guard against a misconfigured or uncooperative backend that /* guard against a misconfigured or uncooperative backend that
* is sending packet larger than the MTU. * is sending packet larger than the MTU.
*/ */
if ((page_off + buflen) > PAGE_SIZE) { if ((page_off + buflen + tailroom) > PAGE_SIZE) {
put_page(p); put_page(p);
goto err_buf; goto err_buf;
} }
@ -690,6 +691,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
unsigned int truesize; unsigned int truesize;
unsigned int headroom = mergeable_ctx_to_headroom(ctx); unsigned int headroom = mergeable_ctx_to_headroom(ctx);
bool sent; bool sent;
int err;
head_skb = NULL; head_skb = NULL;
@ -701,7 +703,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
void *data; void *data;
u32 act; u32 act;
/* This happens when rx buffer size is underestimated */ /* This happens when rx buffer size is underestimated
* or headroom is not enough because of the buffer
* was refilled before XDP is set. This should only
* happen for the first several packets, so we don't
* care much about its performance.
*/
if (unlikely(num_buf > 1 || if (unlikely(num_buf > 1 ||
headroom < virtnet_get_headroom(vi))) { headroom < virtnet_get_headroom(vi))) {
/* linearize data for XDP */ /* linearize data for XDP */
@ -736,9 +743,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
act = bpf_prog_run_xdp(xdp_prog, &xdp); act = bpf_prog_run_xdp(xdp_prog, &xdp);
if (act != XDP_PASS)
ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
switch (act) { switch (act) {
case XDP_PASS: case XDP_PASS:
/* recalculate offset to account for any header /* recalculate offset to account for any header
@ -770,6 +774,18 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
goto err_xdp; goto err_xdp;
rcu_read_unlock(); rcu_read_unlock();
goto xdp_xmit; goto xdp_xmit;
case XDP_REDIRECT:
err = xdp_do_redirect(dev, &xdp, xdp_prog);
if (err) {
if (unlikely(xdp_page != page))
put_page(xdp_page);
goto err_xdp;
}
*xdp_xmit = true;
if (unlikely(xdp_page != page))
goto err_xdp;
rcu_read_unlock();
goto xdp_xmit;
default: default:
bpf_warn_invalid_xdp_action(act); bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED: case XDP_ABORTED:
@ -1013,13 +1029,18 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
} }
static unsigned int get_mergeable_buf_len(struct receive_queue *rq, static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
struct ewma_pkt_len *avg_pkt_len) struct ewma_pkt_len *avg_pkt_len,
unsigned int room)
{ {
const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
unsigned int len; unsigned int len;
if (room)
return PAGE_SIZE - room;
len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
rq->min_buf_len, PAGE_SIZE - hdr_len); rq->min_buf_len, PAGE_SIZE - hdr_len);
return ALIGN(len, L1_CACHE_BYTES); return ALIGN(len, L1_CACHE_BYTES);
} }
@ -1028,21 +1049,27 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
{ {
struct page_frag *alloc_frag = &rq->alloc_frag; struct page_frag *alloc_frag = &rq->alloc_frag;
unsigned int headroom = virtnet_get_headroom(vi); unsigned int headroom = virtnet_get_headroom(vi);
unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
char *buf; char *buf;
void *ctx; void *ctx;
int err; int err;
unsigned int len, hole; unsigned int len, hole;
len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len); /* Extra tailroom is needed to satisfy XDP's assumption. This
if (unlikely(!skb_page_frag_refill(len + headroom, alloc_frag, gfp))) * means rx frags coalescing won't work, but consider we've
* disabled GSO for XDP, it won't be a big issue.
*/
len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
return -ENOMEM; return -ENOMEM;
buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
buf += headroom; /* advance address leaving hole at front of pkt */ buf += headroom; /* advance address leaving hole at front of pkt */
get_page(alloc_frag->page); get_page(alloc_frag->page);
alloc_frag->offset += len + headroom; alloc_frag->offset += len + room;
hole = alloc_frag->size - alloc_frag->offset; hole = alloc_frag->size - alloc_frag->offset;
if (hole < len + headroom) { if (hole < len + room) {
/* To avoid internal fragmentation, if there is very likely not /* To avoid internal fragmentation, if there is very likely not
* enough space for another buffer, add the remaining space to * enough space for another buffer, add the remaining space to
* the current buffer. * the current buffer.
@ -2185,6 +2212,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
} }
/* Make sure NAPI is not using any XDP TX queues for RX. */ /* Make sure NAPI is not using any XDP TX queues for RX. */
if (netif_running(dev))
for (i = 0; i < vi->max_queue_pairs; i++) for (i = 0; i < vi->max_queue_pairs; i++)
napi_disable(&vi->rq[i].napi); napi_disable(&vi->rq[i].napi);
@ -2205,6 +2233,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
} }
if (old_prog) if (old_prog)
bpf_prog_put(old_prog); bpf_prog_put(old_prog);
if (netif_running(dev))
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
} }
@ -2576,12 +2605,15 @@ static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
{ {
struct virtnet_info *vi = netdev_priv(queue->dev); struct virtnet_info *vi = netdev_priv(queue->dev);
unsigned int queue_index = get_netdev_rx_queue_index(queue); unsigned int queue_index = get_netdev_rx_queue_index(queue);
unsigned int headroom = virtnet_get_headroom(vi);
unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
struct ewma_pkt_len *avg; struct ewma_pkt_len *avg;
BUG_ON(queue_index >= vi->max_queue_pairs); BUG_ON(queue_index >= vi->max_queue_pairs);
avg = &vi->rq[queue_index].mrg_avg_pkt_len; avg = &vi->rq[queue_index].mrg_avg_pkt_len;
return sprintf(buf, "%u\n", return sprintf(buf, "%u\n",
get_mergeable_buf_len(&vi->rq[queue_index], avg)); get_mergeable_buf_len(&vi->rq[queue_index], avg,
SKB_DATA_ALIGN(headroom + tailroom)));
} }
static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =

View File

@ -574,7 +574,10 @@ static void ppp_timer(struct timer_list *t)
ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
0, NULL); 0, NULL);
proto->restart_counter--; proto->restart_counter--;
} else } else if (netif_carrier_ok(proto->dev))
ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
0, NULL);
else
ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0, ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
0, NULL); 0, NULL);
break; break;

View File

@ -2134,24 +2134,25 @@ int qeth_send_control_data(struct qeth_card *card, int len,
} }
reply->callback = reply_cb; reply->callback = reply_cb;
reply->param = reply_param; reply->param = reply_param;
if (card->state == CARD_STATE_DOWN)
reply->seqno = QETH_IDX_COMMAND_SEQNO;
else
reply->seqno = card->seqno.ipa++;
init_waitqueue_head(&reply->wait_q); init_waitqueue_head(&reply->wait_q);
spin_lock_irqsave(&card->lock, flags);
list_add_tail(&reply->list, &card->cmd_waiter_list);
spin_unlock_irqrestore(&card->lock, flags);
while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
qeth_prepare_control_data(card, len, iob);
if (IS_IPA(iob->data)) { if (IS_IPA(iob->data)) {
cmd = __ipa_cmd(iob); cmd = __ipa_cmd(iob);
cmd->hdr.seqno = card->seqno.ipa++;
reply->seqno = cmd->hdr.seqno;
event_timeout = QETH_IPA_TIMEOUT; event_timeout = QETH_IPA_TIMEOUT;
} else { } else {
reply->seqno = QETH_IDX_COMMAND_SEQNO;
event_timeout = QETH_TIMEOUT; event_timeout = QETH_TIMEOUT;
} }
qeth_prepare_control_data(card, len, iob);
spin_lock_irqsave(&card->lock, flags);
list_add_tail(&reply->list, &card->cmd_waiter_list);
spin_unlock_irqrestore(&card->lock, flags);
timeout = jiffies + event_timeout; timeout = jiffies + event_timeout;
@ -2933,7 +2934,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
memset(cmd, 0, sizeof(struct qeth_ipa_cmd)); memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
cmd->hdr.command = command; cmd->hdr.command = command;
cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
cmd->hdr.seqno = card->seqno.ipa; /* cmd->hdr.seqno is set by qeth_send_control_data() */
cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type); cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
cmd->hdr.rel_adapter_no = (__u8) card->info.portno; cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
if (card->options.layer2) if (card->options.layer2)
@ -3898,10 +3899,12 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
int qeth_get_elements_no(struct qeth_card *card, int qeth_get_elements_no(struct qeth_card *card,
struct sk_buff *skb, int extra_elems, int data_offset) struct sk_buff *skb, int extra_elems, int data_offset)
{ {
int elements = qeth_get_elements_for_range( addr_t end = (addr_t)skb->data + skb_headlen(skb);
(addr_t)skb->data + data_offset, int elements = qeth_get_elements_for_frags(skb);
(addr_t)skb->data + skb_headlen(skb)) + addr_t start = (addr_t)skb->data + data_offset;
qeth_get_elements_for_frags(skb);
if (start != end)
elements += qeth_get_elements_for_range(start, end);
if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
QETH_DBF_MESSAGE(2, "Invalid size of IP packet " QETH_DBF_MESSAGE(2, "Invalid size of IP packet "

View File

@ -40,8 +40,40 @@ struct qeth_ipaddr {
unsigned int pfxlen; unsigned int pfxlen;
} a6; } a6;
} u; } u;
}; };
static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1,
struct qeth_ipaddr *a2)
{
if (a1->proto != a2->proto)
return false;
if (a1->proto == QETH_PROT_IPV6)
return ipv6_addr_equal(&a1->u.a6.addr, &a2->u.a6.addr);
return a1->u.a4.addr == a2->u.a4.addr;
}
static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1,
struct qeth_ipaddr *a2)
{
/* Assumes that the pair was obtained via qeth_l3_addr_find_by_ip(),
* so 'proto' and 'addr' match for sure.
*
* For ucast:
* - 'mac' is always 0.
* - 'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching
* values are required to avoid mixups in takeover eligibility.
*
* For mcast,
* - 'mac' is mapped from the IP, and thus always matches.
* - 'mask'/'pfxlen' is always 0.
*/
if (a1->type != a2->type)
return false;
if (a1->proto == QETH_PROT_IPV6)
return a1->u.a6.pfxlen == a2->u.a6.pfxlen;
return a1->u.a4.mask == a2->u.a4.mask;
}
static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr) static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
{ {
u64 ret = 0; u64 ret = 0;

View File

@ -67,6 +67,24 @@ void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
qeth_l3_ipaddr6_to_string(addr, buf); qeth_l3_ipaddr6_to_string(addr, buf);
} }
static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
struct qeth_ipaddr *query)
{
u64 key = qeth_l3_ipaddr_hash(query);
struct qeth_ipaddr *addr;
if (query->is_multicast) {
hash_for_each_possible(card->ip_mc_htable, addr, hnode, key)
if (qeth_l3_addr_match_ip(addr, query))
return addr;
} else {
hash_for_each_possible(card->ip_htable, addr, hnode, key)
if (qeth_l3_addr_match_ip(addr, query))
return addr;
}
return NULL;
}
static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
{ {
int i, j; int i, j;
@ -120,34 +138,6 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
return rc; return rc;
} }
inline int
qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2)
{
return addr1->proto == addr2->proto &&
!memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) &&
ether_addr_equal_64bits(addr1->mac, addr2->mac);
}
static struct qeth_ipaddr *
qeth_l3_ip_from_hash(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
{
struct qeth_ipaddr *addr;
if (tmp_addr->is_multicast) {
hash_for_each_possible(card->ip_mc_htable, addr,
hnode, qeth_l3_ipaddr_hash(tmp_addr))
if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
return addr;
} else {
hash_for_each_possible(card->ip_htable, addr,
hnode, qeth_l3_ipaddr_hash(tmp_addr))
if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
return addr;
}
return NULL;
}
int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
{ {
int rc = 0; int rc = 0;
@ -162,22 +152,17 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
} }
addr = qeth_l3_ip_from_hash(card, tmp_addr); addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
if (!addr) if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr))
return -ENOENT; return -ENOENT;
addr->ref_counter--; addr->ref_counter--;
if (addr->ref_counter > 0 && (addr->type == QETH_IP_TYPE_NORMAL || if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)
addr->type == QETH_IP_TYPE_RXIP))
return rc; return rc;
if (addr->in_progress) if (addr->in_progress)
return -EINPROGRESS; return -EINPROGRESS;
if (!qeth_card_hw_is_reachable(card)) { if (qeth_card_hw_is_reachable(card))
addr->disp_flag = QETH_DISP_ADDR_DELETE;
return 0;
}
rc = qeth_l3_deregister_addr_entry(card, addr); rc = qeth_l3_deregister_addr_entry(card, addr);
hash_del(&addr->hnode); hash_del(&addr->hnode);
@ -190,6 +175,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
{ {
int rc = 0; int rc = 0;
struct qeth_ipaddr *addr; struct qeth_ipaddr *addr;
char buf[40];
QETH_CARD_TEXT(card, 4, "addip"); QETH_CARD_TEXT(card, 4, "addip");
@ -200,8 +186,20 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
} }
addr = qeth_l3_ip_from_hash(card, tmp_addr); addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
if (!addr) { if (addr) {
if (tmp_addr->type != QETH_IP_TYPE_NORMAL)
return -EADDRINUSE;
if (qeth_l3_addr_match_all(addr, tmp_addr)) {
addr->ref_counter++;
return 0;
}
qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u,
buf);
dev_warn(&card->gdev->dev,
"Registering IP address %s failed\n", buf);
return -EADDRINUSE;
} else {
addr = qeth_l3_get_addr_buffer(tmp_addr->proto); addr = qeth_l3_get_addr_buffer(tmp_addr->proto);
if (!addr) if (!addr)
return -ENOMEM; return -ENOMEM;
@ -241,19 +239,15 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
(rc == IPA_RC_LAN_OFFLINE)) { (rc == IPA_RC_LAN_OFFLINE)) {
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
if (addr->ref_counter < 1) { if (addr->ref_counter < 1) {
qeth_l3_delete_ip(card, addr); qeth_l3_deregister_addr_entry(card, addr);
hash_del(&addr->hnode);
kfree(addr); kfree(addr);
} }
} else { } else {
hash_del(&addr->hnode); hash_del(&addr->hnode);
kfree(addr); kfree(addr);
} }
} else {
if (addr->type == QETH_IP_TYPE_NORMAL ||
addr->type == QETH_IP_TYPE_RXIP)
addr->ref_counter++;
} }
return rc; return rc;
} }
@ -321,11 +315,7 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
spin_lock_bh(&card->ip_lock); spin_lock_bh(&card->ip_lock);
hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
if (addr->disp_flag == QETH_DISP_ADDR_DELETE) { if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
qeth_l3_deregister_addr_entry(card, addr);
hash_del(&addr->hnode);
kfree(addr);
} else if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
if (addr->proto == QETH_PROT_IPV4) { if (addr->proto == QETH_PROT_IPV4) {
addr->in_progress = 1; addr->in_progress = 1;
spin_unlock_bh(&card->ip_lock); spin_unlock_bh(&card->ip_lock);
@ -643,12 +633,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
return -ENOMEM; return -ENOMEM;
spin_lock_bh(&card->ip_lock); spin_lock_bh(&card->ip_lock);
if (qeth_l3_ip_from_hash(card, ipaddr))
rc = -EEXIST;
else
rc = qeth_l3_add_ip(card, ipaddr); rc = qeth_l3_add_ip(card, ipaddr);
spin_unlock_bh(&card->ip_lock); spin_unlock_bh(&card->ip_lock);
kfree(ipaddr); kfree(ipaddr);
@ -713,12 +698,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
return -ENOMEM; return -ENOMEM;
spin_lock_bh(&card->ip_lock); spin_lock_bh(&card->ip_lock);
if (qeth_l3_ip_from_hash(card, ipaddr))
rc = -EEXIST;
else
rc = qeth_l3_add_ip(card, ipaddr); rc = qeth_l3_add_ip(card, ipaddr);
spin_unlock_bh(&card->ip_lock); spin_unlock_bh(&card->ip_lock);
kfree(ipaddr); kfree(ipaddr);
@ -1239,8 +1219,9 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
tmp->u.a4.addr = be32_to_cpu(im4->multiaddr); tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
tmp->is_multicast = 1; tmp->is_multicast = 1;
ipm = qeth_l3_ip_from_hash(card, tmp); ipm = qeth_l3_find_addr_by_ip(card, tmp);
if (ipm) { if (ipm) {
/* for mcast, by-IP match means full match */
ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
} else { } else {
ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
@ -1319,8 +1300,9 @@ static void qeth_l3_add_mc6_to_hash(struct qeth_card *card,
sizeof(struct in6_addr)); sizeof(struct in6_addr));
tmp->is_multicast = 1; tmp->is_multicast = 1;
ipm = qeth_l3_ip_from_hash(card, tmp); ipm = qeth_l3_find_addr_by_ip(card, tmp);
if (ipm) { if (ipm) {
/* for mcast, by-IP match means full match */
ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
continue; continue;
} }
@ -2450,11 +2432,12 @@ static void qeth_tso_fill_header(struct qeth_card *card,
static int qeth_l3_get_elements_no_tso(struct qeth_card *card, static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
struct sk_buff *skb, int extra_elems) struct sk_buff *skb, int extra_elems)
{ {
addr_t tcpdptr = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb); addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
int elements = qeth_get_elements_for_range( addr_t end = (addr_t)skb->data + skb_headlen(skb);
tcpdptr, int elements = qeth_get_elements_for_frags(skb);
(addr_t)skb->data + skb_headlen(skb)) +
qeth_get_elements_for_frags(skb); if (start != end)
elements += qeth_get_elements_for_range(start, end);
if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
QETH_DBF_MESSAGE(2, QETH_DBF_MESSAGE(2,

View File

@ -924,6 +924,7 @@ void phy_device_remove(struct phy_device *phydev);
int phy_init_hw(struct phy_device *phydev); int phy_init_hw(struct phy_device *phydev);
int phy_suspend(struct phy_device *phydev); int phy_suspend(struct phy_device *phydev);
int phy_resume(struct phy_device *phydev); int phy_resume(struct phy_device *phydev);
int __phy_resume(struct phy_device *phydev);
int phy_loopback(struct phy_device *phydev, bool enable); int phy_loopback(struct phy_device *phydev, bool enable);
struct phy_device *phy_attach(struct net_device *dev, const char *bus_id, struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
phy_interface_t interface); phy_interface_t interface);

View File

@ -3285,8 +3285,7 @@ int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
void skb_scrub_packet(struct sk_buff *skb, bool xnet); void skb_scrub_packet(struct sk_buff *skb, bool xnet);
unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len); bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb); struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
@ -4104,38 +4103,6 @@ static inline bool skb_head_is_locked(const struct sk_buff *skb)
return !skb->head_frag || skb_cloned(skb); return !skb->head_frag || skb_cloned(skb);
} }
/**
* skb_gso_network_seglen - Return length of individual segments of a gso packet
*
* @skb: GSO skb
*
* skb_gso_network_seglen is used to determine the real size of the
* individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
*
* The MAC/L2 header is not accounted for.
*/
static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
{
unsigned int hdr_len = skb_transport_header(skb) -
skb_network_header(skb);
return hdr_len + skb_gso_transport_seglen(skb);
}
/**
* skb_gso_mac_seglen - Return length of individual segments of a gso packet
*
* @skb: GSO skb
*
* skb_gso_mac_seglen is used to determine the real size of the
* individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
* headers (TCP/UDP).
*/
static inline unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
{
unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
return hdr_len + skb_gso_transport_seglen(skb);
}
/* Local Checksum Offload. /* Local Checksum Offload.
* Compute outer checksum based on the assumption that the * Compute outer checksum based on the assumption that the
* inner checksum will be offloaded later. * inner checksum will be offloaded later.

View File

@ -257,6 +257,18 @@ struct devlink_resource_size_params {
enum devlink_resource_unit unit; enum devlink_resource_unit unit;
}; };
static inline void
devlink_resource_size_params_init(struct devlink_resource_size_params *size_params,
u64 size_min, u64 size_max,
u64 size_granularity,
enum devlink_resource_unit unit)
{
size_params->size_min = size_min;
size_params->size_max = size_max;
size_params->size_granularity = size_granularity;
size_params->unit = unit;
}
/** /**
* struct devlink_resource - devlink resource * struct devlink_resource - devlink resource
* @name: name of the resource * @name: name of the resource
@ -278,7 +290,7 @@ struct devlink_resource {
u64 size_new; u64 size_new;
bool size_valid; bool size_valid;
struct devlink_resource *parent; struct devlink_resource *parent;
struct devlink_resource_size_params *size_params; struct devlink_resource_size_params size_params;
struct list_head list; struct list_head list;
struct list_head resource_list; struct list_head resource_list;
const struct devlink_resource_ops *resource_ops; const struct devlink_resource_ops *resource_ops;
@ -402,7 +414,7 @@ int devlink_resource_register(struct devlink *devlink,
u64 resource_size, u64 resource_size,
u64 resource_id, u64 resource_id,
u64 parent_resource_id, u64 parent_resource_id,
struct devlink_resource_size_params *size_params, const struct devlink_resource_size_params *size_params,
const struct devlink_resource_ops *resource_ops); const struct devlink_resource_ops *resource_ops);
void devlink_resources_unregister(struct devlink *devlink, void devlink_resources_unregister(struct devlink *devlink,
struct devlink_resource *resource); struct devlink_resource *resource);
@ -556,7 +568,7 @@ devlink_resource_register(struct devlink *devlink,
u64 resource_size, u64 resource_size,
u64 resource_id, u64 resource_id,
u64 parent_resource_id, u64 parent_resource_id,
struct devlink_resource_size_params *size_params, const struct devlink_resource_size_params *size_params,
const struct devlink_resource_ops *resource_ops) const struct devlink_resource_ops *resource_ops)
{ {
return 0; return 0;

View File

@ -1356,6 +1356,13 @@ static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
return reg->type == PTR_TO_CTX; return reg->type == PTR_TO_CTX;
} }
static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
{
const struct bpf_reg_state *reg = cur_regs(env) + regno;
return type_is_pkt_pointer(reg->type);
}
static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, const struct bpf_reg_state *reg,
int off, int size, bool strict) int off, int size, bool strict)
@ -1416,10 +1423,10 @@ static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
} }
static int check_ptr_alignment(struct bpf_verifier_env *env, static int check_ptr_alignment(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, const struct bpf_reg_state *reg, int off,
int off, int size) int size, bool strict_alignment_once)
{ {
bool strict = env->strict_alignment; bool strict = env->strict_alignment || strict_alignment_once;
const char *pointer_desc = ""; const char *pointer_desc = "";
switch (reg->type) { switch (reg->type) {
@ -1576,9 +1583,9 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
* if t==write && value_regno==-1, some unknown value is stored into memory * if t==write && value_regno==-1, some unknown value is stored into memory
* if t==read && value_regno==-1, don't care what we read from memory * if t==read && value_regno==-1, don't care what we read from memory
*/ */
static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off, static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
int bpf_size, enum bpf_access_type t, int off, int bpf_size, enum bpf_access_type t,
int value_regno) int value_regno, bool strict_alignment_once)
{ {
struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = regs + regno; struct bpf_reg_state *reg = regs + regno;
@ -1590,7 +1597,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
return size; return size;
/* alignment checks will add in reg->off themselves */ /* alignment checks will add in reg->off themselves */
err = check_ptr_alignment(env, reg, off, size); err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
if (err) if (err)
return err; return err;
@ -1735,21 +1742,23 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
return -EACCES; return -EACCES;
} }
if (is_ctx_reg(env, insn->dst_reg)) { if (is_ctx_reg(env, insn->dst_reg) ||
verbose(env, "BPF_XADD stores into R%d context is not allowed\n", is_pkt_reg(env, insn->dst_reg)) {
insn->dst_reg); verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
insn->dst_reg, is_ctx_reg(env, insn->dst_reg) ?
"context" : "packet");
return -EACCES; return -EACCES;
} }
/* check whether atomic_add can read the memory */ /* check whether atomic_add can read the memory */
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ, -1); BPF_SIZE(insn->code), BPF_READ, -1, true);
if (err) if (err)
return err; return err;
/* check whether atomic_add can write into the same memory */ /* check whether atomic_add can write into the same memory */
return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE, -1); BPF_SIZE(insn->code), BPF_WRITE, -1, true);
} }
/* when register 'regno' is passed into function that will read 'access_size' /* when register 'regno' is passed into function that will read 'access_size'
@ -2388,7 +2397,8 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
* is inferred from register state. * is inferred from register state.
*/ */
for (i = 0; i < meta.access_size; i++) { for (i = 0; i < meta.access_size; i++) {
err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1); err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
BPF_WRITE, -1, false);
if (err) if (err)
return err; return err;
} }
@ -4632,7 +4642,7 @@ static int do_check(struct bpf_verifier_env *env)
*/ */
err = check_mem_access(env, insn_idx, insn->src_reg, insn->off, err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ, BPF_SIZE(insn->code), BPF_READ,
insn->dst_reg); insn->dst_reg, false);
if (err) if (err)
return err; return err;
@ -4684,7 +4694,7 @@ static int do_check(struct bpf_verifier_env *env)
/* check that memory (dst_reg + off) is writeable */ /* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE, BPF_SIZE(insn->code), BPF_WRITE,
insn->src_reg); insn->src_reg, false);
if (err) if (err)
return err; return err;
@ -4719,7 +4729,7 @@ static int do_check(struct bpf_verifier_env *env)
/* check that memory (dst_reg + off) is writeable */ /* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE, BPF_SIZE(insn->code), BPF_WRITE,
-1); -1, false);
if (err) if (err)
return err; return err;

View File

@ -24,10 +24,11 @@
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/sched.h>
/* General test specific settings */ /* General test specific settings */
#define MAX_SUBTESTS 3 #define MAX_SUBTESTS 3
#define MAX_TESTRUNS 10000 #define MAX_TESTRUNS 1000
#define MAX_DATA 128 #define MAX_DATA 128
#define MAX_INSNS 512 #define MAX_INSNS 512
#define MAX_K 0xffffFFFF #define MAX_K 0xffffFFFF
@ -6582,6 +6583,7 @@ static __init int test_bpf(void)
struct bpf_prog *fp; struct bpf_prog *fp;
int err; int err;
cond_resched();
if (exclude_test(i)) if (exclude_test(i))
continue; continue;

View File

@ -157,7 +157,7 @@ static void batadv_iv_ogm_orig_free(struct batadv_orig_node *orig_node)
* Return: 0 on success, a negative error code otherwise. * Return: 0 on success, a negative error code otherwise.
*/ */
static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node, static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node,
int max_if_num) unsigned int max_if_num)
{ {
void *data_ptr; void *data_ptr;
size_t old_size; size_t old_size;
@ -201,7 +201,8 @@ static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node,
*/ */
static void static void
batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node, batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node,
int max_if_num, int del_if_num) unsigned int max_if_num,
unsigned int del_if_num)
{ {
size_t chunk_size; size_t chunk_size;
size_t if_offset; size_t if_offset;
@ -239,7 +240,8 @@ batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node,
*/ */
static void static void
batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node, batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node,
int max_if_num, int del_if_num) unsigned int max_if_num,
unsigned int del_if_num)
{ {
size_t if_offset; size_t if_offset;
void *data_ptr; void *data_ptr;
@ -276,7 +278,8 @@ batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node,
* Return: 0 on success, a negative error code otherwise. * Return: 0 on success, a negative error code otherwise.
*/ */
static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node, static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
int max_if_num, int del_if_num) unsigned int max_if_num,
unsigned int del_if_num)
{ {
spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
@ -311,7 +314,8 @@ static struct batadv_orig_node *
batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr) batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr)
{ {
struct batadv_orig_node *orig_node; struct batadv_orig_node *orig_node;
int size, hash_added; int hash_added;
size_t size;
orig_node = batadv_orig_hash_find(bat_priv, addr); orig_node = batadv_orig_hash_find(bat_priv, addr);
if (orig_node) if (orig_node)
@ -893,7 +897,7 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
u32 i; u32 i;
size_t word_index; size_t word_index;
u8 *w; u8 *w;
int if_num; unsigned int if_num;
for (i = 0; i < hash->size; i++) { for (i = 0; i < hash->size; i++) {
head = &hash->table[i]; head = &hash->table[i];
@ -1023,7 +1027,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
struct batadv_neigh_node *tmp_neigh_node = NULL; struct batadv_neigh_node *tmp_neigh_node = NULL;
struct batadv_neigh_node *router = NULL; struct batadv_neigh_node *router = NULL;
struct batadv_orig_node *orig_node_tmp; struct batadv_orig_node *orig_node_tmp;
int if_num; unsigned int if_num;
u8 sum_orig, sum_neigh; u8 sum_orig, sum_neigh;
u8 *neigh_addr; u8 *neigh_addr;
u8 tq_avg; u8 tq_avg;
@ -1182,7 +1186,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
u8 total_count; u8 total_count;
u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own; u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
unsigned int neigh_rq_inv_cube, neigh_rq_max_cube; unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
int if_num; unsigned int if_num;
unsigned int tq_asym_penalty, inv_asym_penalty; unsigned int tq_asym_penalty, inv_asym_penalty;
unsigned int combined_tq; unsigned int combined_tq;
unsigned int tq_iface_penalty; unsigned int tq_iface_penalty;
@ -1702,9 +1706,9 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
if (is_my_orig) { if (is_my_orig) {
unsigned long *word; unsigned long *word;
int offset; size_t offset;
s32 bit_pos; s32 bit_pos;
s16 if_num; unsigned int if_num;
u8 *weight; u8 *weight;
orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv, orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv,
@ -2729,7 +2733,7 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
struct batadv_neigh_ifinfo *router_ifinfo = NULL; struct batadv_neigh_ifinfo *router_ifinfo = NULL;
struct batadv_neigh_node *router; struct batadv_neigh_node *router;
struct batadv_gw_node *curr_gw; struct batadv_gw_node *curr_gw;
int ret = -EINVAL; int ret = 0;
void *hdr; void *hdr;
router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);

View File

@ -928,7 +928,7 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
struct batadv_neigh_ifinfo *router_ifinfo = NULL; struct batadv_neigh_ifinfo *router_ifinfo = NULL;
struct batadv_neigh_node *router; struct batadv_neigh_node *router;
struct batadv_gw_node *curr_gw; struct batadv_gw_node *curr_gw;
int ret = -EINVAL; int ret = 0;
void *hdr; void *hdr;
router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);

View File

@ -2161,22 +2161,25 @@ batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
{ {
struct batadv_bla_claim *claim; struct batadv_bla_claim *claim;
int idx = 0; int idx = 0;
int ret = 0;
rcu_read_lock(); rcu_read_lock();
hlist_for_each_entry_rcu(claim, head, hash_entry) { hlist_for_each_entry_rcu(claim, head, hash_entry) {
if (idx++ < *idx_skip) if (idx++ < *idx_skip)
continue; continue;
if (batadv_bla_claim_dump_entry(msg, portid, seq,
primary_if, claim)) { ret = batadv_bla_claim_dump_entry(msg, portid, seq,
primary_if, claim);
if (ret) {
*idx_skip = idx - 1; *idx_skip = idx - 1;
goto unlock; goto unlock;
} }
} }
*idx_skip = idx; *idx_skip = 0;
unlock: unlock:
rcu_read_unlock(); rcu_read_unlock();
return 0; return ret;
} }
/** /**
@ -2391,22 +2394,25 @@ batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
{ {
struct batadv_bla_backbone_gw *backbone_gw; struct batadv_bla_backbone_gw *backbone_gw;
int idx = 0; int idx = 0;
int ret = 0;
rcu_read_lock(); rcu_read_lock();
hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
if (idx++ < *idx_skip) if (idx++ < *idx_skip)
continue; continue;
if (batadv_bla_backbone_dump_entry(msg, portid, seq,
primary_if, backbone_gw)) { ret = batadv_bla_backbone_dump_entry(msg, portid, seq,
primary_if, backbone_gw);
if (ret) {
*idx_skip = idx - 1; *idx_skip = idx - 1;
goto unlock; goto unlock;
} }
} }
*idx_skip = idx; *idx_skip = 0;
unlock: unlock:
rcu_read_unlock(); rcu_read_unlock();
return 0; return ret;
} }
/** /**

View File

@ -288,7 +288,8 @@ batadv_frag_merge_packets(struct hlist_head *chain)
/* Move the existing MAC header to just before the payload. (Override /* Move the existing MAC header to just before the payload. (Override
* the fragment header.) * the fragment header.)
*/ */
skb_pull_rcsum(skb_out, hdr_size); skb_pull(skb_out, hdr_size);
skb_out->ip_summed = CHECKSUM_NONE;
memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN); memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
skb_set_mac_header(skb_out, -ETH_HLEN); skb_set_mac_header(skb_out, -ETH_HLEN);
skb_reset_network_header(skb_out); skb_reset_network_header(skb_out);

View File

@ -763,6 +763,11 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
hard_iface->soft_iface = soft_iface; hard_iface->soft_iface = soft_iface;
bat_priv = netdev_priv(hard_iface->soft_iface); bat_priv = netdev_priv(hard_iface->soft_iface);
if (bat_priv->num_ifaces >= UINT_MAX) {
ret = -ENOSPC;
goto err_dev;
}
ret = netdev_master_upper_dev_link(hard_iface->net_dev, ret = netdev_master_upper_dev_link(hard_iface->net_dev,
soft_iface, NULL, NULL, NULL); soft_iface, NULL, NULL, NULL);
if (ret) if (ret)
@ -876,7 +881,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
batadv_hardif_recalc_extra_skbroom(hard_iface->soft_iface); batadv_hardif_recalc_extra_skbroom(hard_iface->soft_iface);
/* nobody uses this interface anymore */ /* nobody uses this interface anymore */
if (!bat_priv->num_ifaces) { if (bat_priv->num_ifaces == 0) {
batadv_gw_check_client_stop(bat_priv); batadv_gw_check_client_stop(bat_priv);
if (autodel == BATADV_IF_CLEANUP_AUTO) if (autodel == BATADV_IF_CLEANUP_AUTO)
@ -912,7 +917,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
if (ret) if (ret)
goto free_if; goto free_if;
hard_iface->if_num = -1; hard_iface->if_num = 0;
hard_iface->net_dev = net_dev; hard_iface->net_dev = net_dev;
hard_iface->soft_iface = NULL; hard_iface->soft_iface = NULL;
hard_iface->if_status = BATADV_IF_NOT_IN_USE; hard_iface->if_status = BATADV_IF_NOT_IN_USE;

View File

@ -1569,7 +1569,7 @@ int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb)
* Return: 0 on success or negative error number in case of failure * Return: 0 on success or negative error number in case of failure
*/ */
int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
int max_if_num) unsigned int max_if_num)
{ {
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct batadv_algo_ops *bao = bat_priv->algo_ops; struct batadv_algo_ops *bao = bat_priv->algo_ops;
@ -1611,7 +1611,7 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
* Return: 0 on success or negative error number in case of failure * Return: 0 on success or negative error number in case of failure
*/ */
int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
int max_if_num) unsigned int max_if_num)
{ {
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct batadv_hashtable *hash = bat_priv->orig_hash; struct batadv_hashtable *hash = bat_priv->orig_hash;

View File

@ -73,9 +73,9 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset);
int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb); int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb);
int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset); int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset);
int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
int max_if_num); unsigned int max_if_num);
int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
int max_if_num); unsigned int max_if_num);
struct batadv_orig_node_vlan * struct batadv_orig_node_vlan *
batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node, batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
unsigned short vid); unsigned short vid);

View File

@ -459,13 +459,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
/* skb->dev & skb->pkt_type are set here */ /* skb->dev & skb->pkt_type are set here */
skb->protocol = eth_type_trans(skb, soft_iface); skb->protocol = eth_type_trans(skb, soft_iface);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
/* should not be necessary anymore as we use skb_pull_rcsum()
* TODO: please verify this and remove this TODO
* -- Dec 21st 2009, Simon Wunderlich
*/
/* skb->ip_summed = CHECKSUM_UNNECESSARY; */
batadv_inc_counter(bat_priv, BATADV_CNT_RX); batadv_inc_counter(bat_priv, BATADV_CNT_RX);
batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,

View File

@ -167,7 +167,7 @@ struct batadv_hard_iface {
struct list_head list; struct list_head list;
/** @if_num: identificator of the interface */ /** @if_num: identificator of the interface */
s16 if_num; unsigned int if_num;
/** @if_status: status of the interface for batman-adv */ /** @if_status: status of the interface for batman-adv */
char if_status; char if_status;
@ -1596,7 +1596,7 @@ struct batadv_priv {
atomic_t batman_queue_left; atomic_t batman_queue_left;
/** @num_ifaces: number of interfaces assigned to this mesh interface */ /** @num_ifaces: number of interfaces assigned to this mesh interface */
char num_ifaces; unsigned int num_ifaces;
/** @mesh_obj: kobject for sysfs mesh subdirectory */ /** @mesh_obj: kobject for sysfs mesh subdirectory */
struct kobject *mesh_obj; struct kobject *mesh_obj;
@ -2186,15 +2186,16 @@ struct batadv_algo_orig_ops {
* orig_node due to a new hard-interface being added into the mesh * orig_node due to a new hard-interface being added into the mesh
* (optional) * (optional)
*/ */
int (*add_if)(struct batadv_orig_node *orig_node, int max_if_num); int (*add_if)(struct batadv_orig_node *orig_node,
unsigned int max_if_num);
/** /**
* @del_if: ask the routing algorithm to apply the needed changes to the * @del_if: ask the routing algorithm to apply the needed changes to the
* orig_node due to an hard-interface being removed from the mesh * orig_node due to an hard-interface being removed from the mesh
* (optional) * (optional)
*/ */
int (*del_if)(struct batadv_orig_node *orig_node, int max_if_num, int (*del_if)(struct batadv_orig_node *orig_node,
int del_if_num); unsigned int max_if_num, unsigned int del_if_num);
#ifdef CONFIG_BATMAN_ADV_DEBUGFS #ifdef CONFIG_BATMAN_ADV_DEBUGFS
/** @print: print the originator table (optional) */ /** @print: print the originator table (optional) */

View File

@ -214,7 +214,7 @@ static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
iph = ip_hdr(skb); iph = ip_hdr(skb);
if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
goto inhdr_error; goto csum_error;
len = ntohs(iph->tot_len); len = ntohs(iph->tot_len);
if (skb->len < len) { if (skb->len < len) {
@ -236,6 +236,8 @@ static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
*/ */
return 0; return 0;
csum_error:
__IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
inhdr_error: inhdr_error:
__IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS); __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
drop: drop:

View File

@ -168,6 +168,8 @@ static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid
masterv = br_vlan_find(vg, vid); masterv = br_vlan_find(vg, vid);
if (WARN_ON(!masterv)) if (WARN_ON(!masterv))
return NULL; return NULL;
refcount_set(&masterv->refcnt, 1);
return masterv;
} }
refcount_inc(&masterv->refcnt); refcount_inc(&masterv->refcnt);

View File

@ -172,18 +172,35 @@ ebt_among_mt(const struct sk_buff *skb, struct xt_action_param *par)
return true; return true;
} }
static bool poolsize_invalid(const struct ebt_mac_wormhash *w)
{
return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple));
}
static int ebt_among_mt_check(const struct xt_mtchk_param *par) static int ebt_among_mt_check(const struct xt_mtchk_param *par)
{ {
const struct ebt_among_info *info = par->matchinfo; const struct ebt_among_info *info = par->matchinfo;
const struct ebt_entry_match *em = const struct ebt_entry_match *em =
container_of(par->matchinfo, const struct ebt_entry_match, data); container_of(par->matchinfo, const struct ebt_entry_match, data);
int expected_length = sizeof(struct ebt_among_info); unsigned int expected_length = sizeof(struct ebt_among_info);
const struct ebt_mac_wormhash *wh_dst, *wh_src; const struct ebt_mac_wormhash *wh_dst, *wh_src;
int err; int err;
if (expected_length > em->match_size)
return -EINVAL;
wh_dst = ebt_among_wh_dst(info); wh_dst = ebt_among_wh_dst(info);
wh_src = ebt_among_wh_src(info); if (poolsize_invalid(wh_dst))
return -EINVAL;
expected_length += ebt_mac_wormhash_size(wh_dst); expected_length += ebt_mac_wormhash_size(wh_dst);
if (expected_length > em->match_size)
return -EINVAL;
wh_src = ebt_among_wh_src(info);
if (poolsize_invalid(wh_src))
return -EINVAL;
expected_length += ebt_mac_wormhash_size(wh_src); expected_length += ebt_mac_wormhash_size(wh_src);
if (em->match_size != EBT_ALIGN(expected_length)) { if (em->match_size != EBT_ALIGN(expected_length)) {

View File

@ -1641,7 +1641,8 @@ static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
int off = ebt_compat_match_offset(match, m->match_size); int off = ebt_compat_match_offset(match, m->match_size);
compat_uint_t msize = m->match_size - off; compat_uint_t msize = m->match_size - off;
BUG_ON(off >= m->match_size); if (WARN_ON(off >= m->match_size))
return -EINVAL;
if (copy_to_user(cm->u.name, match->name, if (copy_to_user(cm->u.name, match->name,
strlen(match->name) + 1) || put_user(msize, &cm->match_size)) strlen(match->name) + 1) || put_user(msize, &cm->match_size))
@ -1671,7 +1672,8 @@ static int compat_target_to_user(struct ebt_entry_target *t,
int off = xt_compat_target_offset(target); int off = xt_compat_target_offset(target);
compat_uint_t tsize = t->target_size - off; compat_uint_t tsize = t->target_size - off;
BUG_ON(off >= t->target_size); if (WARN_ON(off >= t->target_size))
return -EINVAL;
if (copy_to_user(cm->u.name, target->name, if (copy_to_user(cm->u.name, target->name,
strlen(target->name) + 1) || put_user(tsize, &cm->match_size)) strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
@ -1902,7 +1904,8 @@ static int ebt_buf_add(struct ebt_entries_buf_state *state,
if (state->buf_kern_start == NULL) if (state->buf_kern_start == NULL)
goto count_only; goto count_only;
BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len); if (WARN_ON(state->buf_kern_offset + sz > state->buf_kern_len))
return -EINVAL;
memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz); memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
@ -1915,7 +1918,8 @@ static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
{ {
char *b = state->buf_kern_start; char *b = state->buf_kern_start;
BUG_ON(b && state->buf_kern_offset > state->buf_kern_len); if (WARN_ON(b && state->buf_kern_offset > state->buf_kern_len))
return -EINVAL;
if (b != NULL && sz > 0) if (b != NULL && sz > 0)
memset(b + state->buf_kern_offset, 0, sz); memset(b + state->buf_kern_offset, 0, sz);
@ -1992,8 +1996,10 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
pad = XT_ALIGN(size_kern) - size_kern; pad = XT_ALIGN(size_kern) - size_kern;
if (pad > 0 && dst) { if (pad > 0 && dst) {
BUG_ON(state->buf_kern_len <= pad); if (WARN_ON(state->buf_kern_len <= pad))
BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad); return -EINVAL;
if (WARN_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad))
return -EINVAL;
memset(dst + size_kern, 0, pad); memset(dst + size_kern, 0, pad);
} }
return off + match_size; return off + match_size;
@ -2043,7 +2049,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
if (ret < 0) if (ret < 0)
return ret; return ret;
BUG_ON(ret < match32->match_size); if (WARN_ON(ret < match32->match_size))
return -EINVAL;
growth += ret - match32->match_size; growth += ret - match32->match_size;
growth += ebt_compat_entry_padsize(); growth += ebt_compat_entry_padsize();
@ -2053,7 +2060,9 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
if (match_kern) if (match_kern)
match_kern->match_size = ret; match_kern->match_size = ret;
WARN_ON(type == EBT_COMPAT_TARGET && size_left); if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
return -EINVAL;
match32 = (struct compat_ebt_entry_mwt *) buf; match32 = (struct compat_ebt_entry_mwt *) buf;
} }
@ -2109,6 +2118,15 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
* *
* offsets are relative to beginning of struct ebt_entry (i.e., 0). * offsets are relative to beginning of struct ebt_entry (i.e., 0).
*/ */
for (i = 0; i < 4 ; ++i) {
if (offsets[i] >= *total)
return -EINVAL;
if (i == 0)
continue;
if (offsets[i-1] > offsets[i])
return -EINVAL;
}
for (i = 0, j = 1 ; j < 4 ; j++, i++) { for (i = 0, j = 1 ; j < 4 ; j++, i++) {
struct compat_ebt_entry_mwt *match32; struct compat_ebt_entry_mwt *match32;
unsigned int size; unsigned int size;
@ -2140,7 +2158,8 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
startoff = state->buf_user_offset - startoff; startoff = state->buf_user_offset - startoff;
BUG_ON(*total < startoff); if (WARN_ON(*total < startoff))
return -EINVAL;
*total -= startoff; *total -= startoff;
return 0; return 0;
} }
@ -2267,7 +2286,8 @@ static int compat_do_replace(struct net *net, void __user *user,
state.buf_kern_len = size64; state.buf_kern_len = size64;
ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
BUG_ON(ret < 0); /* parses same data again */ if (WARN_ON(ret < 0))
goto out_unlock;
vfree(entries_tmp); vfree(entries_tmp);
tmp.entries_size = size64; tmp.entries_size = size64;

View File

@ -6396,6 +6396,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
.linking = true, .linking = true,
.upper_info = upper_info, .upper_info = upper_info,
}; };
struct net_device *master_dev;
int ret = 0; int ret = 0;
ASSERT_RTNL(); ASSERT_RTNL();
@ -6407,11 +6408,14 @@ static int __netdev_upper_dev_link(struct net_device *dev,
if (netdev_has_upper_dev(upper_dev, dev)) if (netdev_has_upper_dev(upper_dev, dev))
return -EBUSY; return -EBUSY;
if (!master) {
if (netdev_has_upper_dev(dev, upper_dev)) if (netdev_has_upper_dev(dev, upper_dev))
return -EEXIST; return -EEXIST;
} else {
if (master && netdev_master_upper_dev_get(dev)) master_dev = netdev_master_upper_dev_get(dev);
return -EBUSY; if (master_dev)
return master_dev == upper_dev ? -EEXIST : -EBUSY;
}
ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
&changeupper_info.info); &changeupper_info.info);

View File

@ -1695,10 +1695,11 @@ static int devlink_dpipe_table_put(struct sk_buff *skb,
goto nla_put_failure; goto nla_put_failure;
if (table->resource_valid) { if (table->resource_valid) {
nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID, if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID,
table->resource_id, DEVLINK_ATTR_PAD); table->resource_id, DEVLINK_ATTR_PAD) ||
nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS, nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,
table->resource_units, DEVLINK_ATTR_PAD); table->resource_units, DEVLINK_ATTR_PAD))
goto nla_put_failure;
} }
if (devlink_dpipe_matches_put(table, skb)) if (devlink_dpipe_matches_put(table, skb))
goto nla_put_failure; goto nla_put_failure;
@ -2332,7 +2333,7 @@ devlink_resource_validate_children(struct devlink_resource *resource)
list_for_each_entry(child_resource, &resource->resource_list, list) list_for_each_entry(child_resource, &resource->resource_list, list)
parts_size += child_resource->size_new; parts_size += child_resource->size_new;
if (parts_size > resource->size) if (parts_size > resource->size_new)
size_valid = false; size_valid = false;
out: out:
resource->size_valid = size_valid; resource->size_valid = size_valid;
@ -2372,20 +2373,22 @@ static int devlink_nl_cmd_resource_set(struct sk_buff *skb,
return 0; return 0;
} }
static void static int
devlink_resource_size_params_put(struct devlink_resource *resource, devlink_resource_size_params_put(struct devlink_resource *resource,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct devlink_resource_size_params *size_params; struct devlink_resource_size_params *size_params;
size_params = resource->size_params; size_params = &resource->size_params;
nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN, if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN,
size_params->size_granularity, DEVLINK_ATTR_PAD); size_params->size_granularity, DEVLINK_ATTR_PAD) ||
nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX, nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX,
size_params->size_max, DEVLINK_ATTR_PAD); size_params->size_max, DEVLINK_ATTR_PAD) ||
nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN, nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN,
size_params->size_min, DEVLINK_ATTR_PAD); size_params->size_min, DEVLINK_ATTR_PAD) ||
nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit); nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit))
return -EMSGSIZE;
return 0;
} }
static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb, static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
@ -2409,10 +2412,12 @@ static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW, nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW,
resource->size_new, DEVLINK_ATTR_PAD); resource->size_new, DEVLINK_ATTR_PAD);
if (resource->resource_ops && resource->resource_ops->occ_get) if (resource->resource_ops && resource->resource_ops->occ_get)
nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC, if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC,
resource->resource_ops->occ_get(devlink), resource->resource_ops->occ_get(devlink),
DEVLINK_ATTR_PAD); DEVLINK_ATTR_PAD))
devlink_resource_size_params_put(resource, skb); goto nla_put_failure;
if (devlink_resource_size_params_put(resource, skb))
goto nla_put_failure;
if (list_empty(&resource->resource_list)) if (list_empty(&resource->resource_list))
goto out; goto out;
@ -3151,7 +3156,7 @@ int devlink_resource_register(struct devlink *devlink,
u64 resource_size, u64 resource_size,
u64 resource_id, u64 resource_id,
u64 parent_resource_id, u64 parent_resource_id,
struct devlink_resource_size_params *size_params, const struct devlink_resource_size_params *size_params,
const struct devlink_resource_ops *resource_ops) const struct devlink_resource_ops *resource_ops)
{ {
struct devlink_resource *resource; struct devlink_resource *resource;
@ -3194,7 +3199,8 @@ int devlink_resource_register(struct devlink *devlink,
resource->id = resource_id; resource->id = resource_id;
resource->resource_ops = resource_ops; resource->resource_ops = resource_ops;
resource->size_valid = true; resource->size_valid = true;
resource->size_params = size_params; memcpy(&resource->size_params, size_params,
sizeof(resource->size_params));
INIT_LIST_HEAD(&resource->resource_list); INIT_LIST_HEAD(&resource->resource_list);
list_add_tail(&resource->list, resource_list); list_add_tail(&resource->list, resource_list);
out: out:

View File

@ -2520,11 +2520,14 @@ static int set_phy_tunable(struct net_device *dev, void __user *useraddr)
static int ethtool_get_fecparam(struct net_device *dev, void __user *useraddr) static int ethtool_get_fecparam(struct net_device *dev, void __user *useraddr)
{ {
struct ethtool_fecparam fecparam = { ETHTOOL_GFECPARAM }; struct ethtool_fecparam fecparam = { ETHTOOL_GFECPARAM };
int rc;
if (!dev->ethtool_ops->get_fecparam) if (!dev->ethtool_ops->get_fecparam)
return -EOPNOTSUPP; return -EOPNOTSUPP;
dev->ethtool_ops->get_fecparam(dev, &fecparam); rc = dev->ethtool_ops->get_fecparam(dev, &fecparam);
if (rc)
return rc;
if (copy_to_user(useraddr, &fecparam, sizeof(fecparam))) if (copy_to_user(useraddr, &fecparam, sizeof(fecparam)))
return -EFAULT; return -EFAULT;

View File

@ -4891,7 +4891,7 @@ EXPORT_SYMBOL_GPL(skb_scrub_packet);
* *
* The MAC/L2 or network (IP, IPv6) headers are not accounted for. * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
*/ */
unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
{ {
const struct skb_shared_info *shinfo = skb_shinfo(skb); const struct skb_shared_info *shinfo = skb_shinfo(skb);
unsigned int thlen = 0; unsigned int thlen = 0;
@ -4913,7 +4913,40 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
*/ */
return thlen + shinfo->gso_size; return thlen + shinfo->gso_size;
} }
EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
/**
* skb_gso_network_seglen - Return length of individual segments of a gso packet
*
* @skb: GSO skb
*
* skb_gso_network_seglen is used to determine the real size of the
* individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
*
* The MAC/L2 header is not accounted for.
*/
static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
{
unsigned int hdr_len = skb_transport_header(skb) -
skb_network_header(skb);
return hdr_len + skb_gso_transport_seglen(skb);
}
/**
* skb_gso_mac_seglen - Return length of individual segments of a gso packet
*
* @skb: GSO skb
*
* skb_gso_mac_seglen is used to determine the real size of the
* individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
* headers (TCP/UDP).
*/
static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
{
unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
return hdr_len + skb_gso_transport_seglen(skb);
}
/** /**
* skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
@ -4955,19 +4988,20 @@ static inline bool skb_gso_size_check(const struct sk_buff *skb,
} }
/** /**
* skb_gso_validate_mtu - Return in case such skb fits a given MTU * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
* *
* @skb: GSO skb * @skb: GSO skb
* @mtu: MTU to validate against * @mtu: MTU to validate against
* *
* skb_gso_validate_mtu validates if a given skb will fit a wanted MTU * skb_gso_validate_network_len validates if a given skb will fit a
* once split. * wanted MTU once split. It considers L3 headers, L4 headers, and the
* payload.
*/ */
bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu) bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
{ {
return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
} }
EXPORT_SYMBOL_GPL(skb_gso_validate_mtu); EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
/** /**
* skb_gso_validate_mac_len - Will a split GSO skb fit in a given length? * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?

View File

@ -55,7 +55,7 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
if (skb->ignore_df) if (skb->ignore_df)
return false; return false;
if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
return false; return false;
return true; return true;

View File

@ -970,9 +970,6 @@ static void __gre_tunnel_init(struct net_device *dev)
t_hlen = tunnel->hlen + sizeof(struct iphdr); t_hlen = tunnel->hlen + sizeof(struct iphdr);
dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
dev->mtu = ETH_DATA_LEN - t_hlen - 4;
dev->features |= GRE_FEATURES; dev->features |= GRE_FEATURES;
dev->hw_features |= GRE_FEATURES; dev->hw_features |= GRE_FEATURES;
@ -1290,8 +1287,6 @@ static int erspan_tunnel_init(struct net_device *dev)
erspan_hdr_len(tunnel->erspan_ver); erspan_hdr_len(tunnel->erspan_ver);
t_hlen = tunnel->hlen + sizeof(struct iphdr); t_hlen = tunnel->hlen + sizeof(struct iphdr);
dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
dev->mtu = ETH_DATA_LEN - t_hlen - 4;
dev->features |= GRE_FEATURES; dev->features |= GRE_FEATURES;
dev->hw_features |= GRE_FEATURES; dev->hw_features |= GRE_FEATURES;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;

View File

@ -248,7 +248,7 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk,
/* common case: seglen is <= mtu /* common case: seglen is <= mtu
*/ */
if (skb_gso_validate_mtu(skb, mtu)) if (skb_gso_validate_network_len(skb, mtu))
return ip_finish_output2(net, sk, skb); return ip_finish_output2(net, sk, skb);
/* Slowpath - GSO segment length exceeds the egress MTU. /* Slowpath - GSO segment length exceeds the egress MTU.

View File

@ -710,16 +710,9 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
} }
} }
if (tunnel->fwmark) {
init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr, init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link, tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link,
tunnel->fwmark); tunnel->fwmark);
}
else {
init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link,
skb->mark);
}
if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0) if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
goto tx_error; goto tx_error;

View File

@ -232,7 +232,6 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
c->hash_mode = i->hash_mode; c->hash_mode = i->hash_mode;
c->hash_initval = i->hash_initval; c->hash_initval = i->hash_initval;
refcount_set(&c->refcount, 1); refcount_set(&c->refcount, 1);
refcount_set(&c->entries, 1);
spin_lock_bh(&cn->lock); spin_lock_bh(&cn->lock);
if (__clusterip_config_find(net, ip)) { if (__clusterip_config_find(net, ip)) {
@ -263,8 +262,10 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
c->notifier.notifier_call = clusterip_netdev_event; c->notifier.notifier_call = clusterip_netdev_event;
err = register_netdevice_notifier(&c->notifier); err = register_netdevice_notifier(&c->notifier);
if (!err) if (!err) {
refcount_set(&c->entries, 1);
return c; return c;
}
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
proc_remove(c->pde); proc_remove(c->pde);
@ -273,7 +274,7 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
spin_lock_bh(&cn->lock); spin_lock_bh(&cn->lock);
list_del_rcu(&c->list); list_del_rcu(&c->list);
spin_unlock_bh(&cn->lock); spin_unlock_bh(&cn->lock);
kfree(c); clusterip_config_put(c);
return ERR_PTR(err); return ERR_PTR(err);
} }
@ -496,12 +497,15 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
return PTR_ERR(config); return PTR_ERR(config);
} }
} }
cipinfo->config = config;
ret = nf_ct_netns_get(par->net, par->family); ret = nf_ct_netns_get(par->net, par->family);
if (ret < 0) if (ret < 0) {
pr_info("cannot load conntrack support for proto=%u\n", pr_info("cannot load conntrack support for proto=%u\n",
par->family); par->family);
clusterip_config_entry_put(par->net, config);
clusterip_config_put(config);
return ret;
}
if (!par->net->xt.clusterip_deprecated_warning) { if (!par->net->xt.clusterip_deprecated_warning) {
pr_info("ipt_CLUSTERIP is deprecated and it will removed soon, " pr_info("ipt_CLUSTERIP is deprecated and it will removed soon, "
@ -509,6 +513,7 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
par->net->xt.clusterip_deprecated_warning = true; par->net->xt.clusterip_deprecated_warning = true;
} }
cipinfo->config = config;
return ret; return ret;
} }

View File

@ -111,6 +111,7 @@ static int nf_flow_dnat_ip(const struct flow_offload *flow, struct sk_buff *skb,
default: default:
return -1; return -1;
} }
csum_replace4(&iph->check, addr, new_addr);
return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr); return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
} }
@ -185,7 +186,7 @@ static bool __nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
if ((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) if ((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0)
return false; return false;
if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
return false; return false;
return true; return true;

View File

@ -128,10 +128,11 @@ static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
static int ip_rt_error_cost __read_mostly = HZ; static int ip_rt_error_cost __read_mostly = HZ;
static int ip_rt_error_burst __read_mostly = 5 * HZ; static int ip_rt_error_burst __read_mostly = 5 * HZ;
static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256; static int ip_rt_min_advmss __read_mostly = 256;
static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
/* /*
* Interface to generic destination cache. * Interface to generic destination cache.
*/ */
@ -930,14 +931,23 @@ void ip_rt_send_redirect(struct sk_buff *skb)
static int ip_error(struct sk_buff *skb) static int ip_error(struct sk_buff *skb)
{ {
struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
struct rtable *rt = skb_rtable(skb); struct rtable *rt = skb_rtable(skb);
struct net_device *dev = skb->dev;
struct in_device *in_dev;
struct inet_peer *peer; struct inet_peer *peer;
unsigned long now; unsigned long now;
struct net *net; struct net *net;
bool send; bool send;
int code; int code;
if (netif_is_l3_master(skb->dev)) {
dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
if (!dev)
goto out;
}
in_dev = __in_dev_get_rcu(dev);
/* IP on this device is disabled. */ /* IP on this device is disabled. */
if (!in_dev) if (!in_dev)
goto out; goto out;
@ -2818,6 +2828,7 @@ void ip_rt_multicast_event(struct in_device *in_dev)
static int ip_rt_gc_interval __read_mostly = 60 * HZ; static int ip_rt_gc_interval __read_mostly = 60 * HZ;
static int ip_rt_gc_min_interval __read_mostly = HZ / 2; static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
static int ip_rt_gc_elasticity __read_mostly = 8; static int ip_rt_gc_elasticity __read_mostly = 8;
static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write, static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
void __user *buffer, void __user *buffer,
@ -2933,7 +2944,8 @@ static struct ctl_table ipv4_route_table[] = {
.data = &ip_rt_min_pmtu, .data = &ip_rt_min_pmtu,
.maxlen = sizeof(int), .maxlen = sizeof(int),
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec, .proc_handler = proc_dointvec_minmax,
.extra1 = &ip_min_valid_pmtu,
}, },
{ {
.procname = "min_adv_mss", .procname = "min_adv_mss",

View File

@ -6,7 +6,7 @@
* The algorithm is described in: * The algorithm is described in:
* "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm
* for High-Speed Networks" * for High-Speed Networks"
* http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf * http://tamerbasar.csl.illinois.edu/LiuBasarSrikantPerfEvalArtJun2008.pdf
* *
* Implemented from description in paper and ns-2 simulation. * Implemented from description in paper and ns-2 simulation.
* Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org> * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org>

View File

@ -1971,11 +1971,6 @@ void tcp_enter_loss(struct sock *sk)
/* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
* loss recovery is underway except recurring timeout(s) on * loss recovery is underway except recurring timeout(s) on
* the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
*
* In theory F-RTO can be used repeatedly during loss recovery.
* In practice this interacts badly with broken middle-boxes that
* falsely raise the receive window, which results in repeated
* timeouts and stop-and-go behavior.
*/ */
tp->frto = net->ipv4.sysctl_tcp_frto && tp->frto = net->ipv4.sysctl_tcp_frto &&
(new_recovery || icsk->icsk_retransmits) && (new_recovery || icsk->icsk_retransmits) &&
@ -2631,18 +2626,14 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
tcp_try_undo_loss(sk, false)) tcp_try_undo_loss(sk, false))
return; return;
/* The ACK (s)acks some never-retransmitted data meaning not all if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
* the data packets before the timeout were lost. Therefore we /* Step 3.b. A timeout is spurious if not all data are
* undo the congestion window and state. This is essentially * lost, i.e., never-retransmitted data are (s)acked.
* the operation in F-RTO (RFC5682 section 3.1 step 3.b). Since
* a retransmitted skb is permantly marked, we can apply such an
* operation even if F-RTO was not used.
*/ */
if ((flag & FLAG_ORIG_SACK_ACKED) && if ((flag & FLAG_ORIG_SACK_ACKED) &&
tcp_try_undo_loss(sk, tp->undo_marker)) tcp_try_undo_loss(sk, true))
return; return;
if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
if (after(tp->snd_nxt, tp->high_seq)) { if (after(tp->snd_nxt, tp->high_seq)) {
if (flag & FLAG_DATA_SACKED || is_dupack) if (flag & FLAG_DATA_SACKED || is_dupack)
tp->frto = 0; /* Step 3.a. loss was real */ tp->frto = 0; /* Step 3.a. loss was real */
@ -4001,6 +3992,7 @@ void tcp_reset(struct sock *sk)
/* This barrier is coupled with smp_rmb() in tcp_poll() */ /* This barrier is coupled with smp_rmb() in tcp_poll() */
smp_wmb(); smp_wmb();
tcp_write_queue_purge(sk);
tcp_done(sk); tcp_done(sk);
if (!sock_flag(sk, SOCK_DEAD)) if (!sock_flag(sk, SOCK_DEAD))

View File

@ -30,7 +30,8 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
mtu = dst_mtu(skb_dst(skb)); mtu = dst_mtu(skb_dst(skb));
if ((!skb_is_gso(skb) && skb->len > mtu) || if ((!skb_is_gso(skb) && skb->len > mtu) ||
(skb_is_gso(skb) && skb_gso_network_seglen(skb) > ip_skb_dst_mtu(skb->sk, skb))) { (skb_is_gso(skb) &&
!skb_gso_validate_network_len(skb, ip_skb_dst_mtu(skb->sk, skb)))) {
skb->protocol = htons(ETH_P_IP); skb->protocol = htons(ETH_P_IP);
if (skb->sk) if (skb->sk)

View File

@ -412,7 +412,7 @@ static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
if (skb->ignore_df) if (skb->ignore_df)
return false; return false;
if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
return false; return false;
return true; return true;

View File

@ -1982,14 +1982,14 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
{ {
struct net *net = dev_net(dev); struct net *net = dev_net(dev);
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
struct ip6_tnl *nt, *t;
struct ip_tunnel_encap ipencap; struct ip_tunnel_encap ipencap;
struct ip6_tnl *nt, *t;
int err;
nt = netdev_priv(dev); nt = netdev_priv(dev);
if (ip6_tnl_netlink_encap_parms(data, &ipencap)) { if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
int err = ip6_tnl_encap_setup(nt, &ipencap); err = ip6_tnl_encap_setup(nt, &ipencap);
if (err < 0) if (err < 0)
return err; return err;
} }
@ -2005,7 +2005,11 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
return -EEXIST; return -EEXIST;
} }
return ip6_tnl_create2(dev); err = ip6_tnl_create2(dev);
if (!err && tb[IFLA_MTU])
ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
return err;
} }
static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],

View File

@ -21,18 +21,19 @@
int ip6_route_me_harder(struct net *net, struct sk_buff *skb) int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
{ {
const struct ipv6hdr *iph = ipv6_hdr(skb); const struct ipv6hdr *iph = ipv6_hdr(skb);
struct sock *sk = sk_to_full_sk(skb->sk);
unsigned int hh_len; unsigned int hh_len;
struct dst_entry *dst; struct dst_entry *dst;
struct flowi6 fl6 = { struct flowi6 fl6 = {
.flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0, .flowi6_oif = sk ? sk->sk_bound_dev_if : 0,
.flowi6_mark = skb->mark, .flowi6_mark = skb->mark,
.flowi6_uid = sock_net_uid(net, skb->sk), .flowi6_uid = sock_net_uid(net, sk),
.daddr = iph->daddr, .daddr = iph->daddr,
.saddr = iph->saddr, .saddr = iph->saddr,
}; };
int err; int err;
dst = ip6_route_output(net, skb->sk, &fl6); dst = ip6_route_output(net, sk, &fl6);
err = dst->error; err = dst->error;
if (err) { if (err) {
IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
@ -50,7 +51,7 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) { xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) {
skb_dst_set(skb, NULL); skb_dst_set(skb, NULL);
dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), skb->sk, 0); dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
if (IS_ERR(dst)) if (IS_ERR(dst))
return PTR_ERR(dst); return PTR_ERR(dst);
skb_dst_set(skb, dst); skb_dst_set(skb, dst);

View File

@ -48,10 +48,6 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
} }
fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
if ((flags & XT_RPFILTER_LOOSE) == 0) {
fl6.flowi6_oif = dev->ifindex;
lookup_flags |= RT6_LOOKUP_F_IFACE;
}
rt = (void *) ip6_route_lookup(net, &fl6, lookup_flags); rt = (void *) ip6_route_lookup(net, &fl6, lookup_flags);
if (rt->dst.error) if (rt->dst.error)

View File

@ -178,7 +178,7 @@ static bool __nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
if (skb->len <= mtu) if (skb->len <= mtu)
return false; return false;
if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
return false; return false;
return true; return true;

View File

@ -99,6 +99,10 @@ static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb,
!l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff, !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff,
target, maniptype)) target, maniptype))
return false; return false;
/* must reload, offset might have changed */
ipv6h = (void *)skb->data + iphdroff;
manip_addr: manip_addr:
if (maniptype == NF_NAT_MANIP_SRC) if (maniptype == NF_NAT_MANIP_SRC)
ipv6h->saddr = target->src.u3.in6; ipv6h->saddr = target->src.u3.in6;

View File

@ -180,7 +180,6 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
} }
*dest = 0; *dest = 0;
again:
rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, lookup_flags); rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, lookup_flags);
if (rt->dst.error) if (rt->dst.error)
goto put_rt_err; goto put_rt_err;
@ -189,15 +188,8 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
if (rt->rt6i_flags & (RTF_REJECT | RTF_ANYCAST | RTF_LOCAL)) if (rt->rt6i_flags & (RTF_REJECT | RTF_ANYCAST | RTF_LOCAL))
goto put_rt_err; goto put_rt_err;
if (oif && oif != rt->rt6i_idev->dev) { if (oif && oif != rt->rt6i_idev->dev)
/* multipath route? Try again with F_IFACE */ goto put_rt_err;
if ((lookup_flags & RT6_LOOKUP_F_IFACE) == 0) {
lookup_flags |= RT6_LOOKUP_F_IFACE;
fl6.flowi6_oif = oif->ifindex;
ip6_rt_put(rt);
goto again;
}
}
switch (priv->result) { switch (priv->result) {
case NFT_FIB_RESULT_OIF: case NFT_FIB_RESULT_OIF:

View File

@ -1578,6 +1578,13 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev,
if (err < 0) if (err < 0)
return err; return err;
if (tb[IFLA_MTU]) {
u32 mtu = nla_get_u32(tb[IFLA_MTU]);
if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len)
dev->mtu = mtu;
}
#ifdef CONFIG_IPV6_SIT_6RD #ifdef CONFIG_IPV6_SIT_6RD
if (ipip6_netlink_6rd_parms(data, &ip6rd)) if (ipip6_netlink_6rd_parms(data, &ip6rd))
err = ipip6_tunnel_update_6rd(nt, &ip6rd); err = ipip6_tunnel_update_6rd(nt, &ip6rd);

View File

@ -82,7 +82,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
if ((!skb_is_gso(skb) && skb->len > mtu) || if ((!skb_is_gso(skb) && skb->len > mtu) ||
(skb_is_gso(skb) && (skb_is_gso(skb) &&
skb_gso_network_seglen(skb) > ip6_skb_dst_mtu(skb))) { !skb_gso_validate_network_len(skb, ip6_skb_dst_mtu(skb)))) {
skb->dev = dst->dev; skb->dev = dst->dev;
skb->protocol = htons(ETH_P_IPV6); skb->protocol = htons(ETH_P_IPV6);

View File

@ -136,51 +136,6 @@ l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
} }
/* Lookup the tunnel socket, possibly involving the fs code if the socket is
* owned by userspace. A struct sock returned from this function must be
* released using l2tp_tunnel_sock_put once you're done with it.
*/
static struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
{
int err = 0;
struct socket *sock = NULL;
struct sock *sk = NULL;
if (!tunnel)
goto out;
if (tunnel->fd >= 0) {
/* Socket is owned by userspace, who might be in the process
* of closing it. Look the socket up using the fd to ensure
* consistency.
*/
sock = sockfd_lookup(tunnel->fd, &err);
if (sock)
sk = sock->sk;
} else {
/* Socket is owned by kernelspace */
sk = tunnel->sock;
sock_hold(sk);
}
out:
return sk;
}
/* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */
static void l2tp_tunnel_sock_put(struct sock *sk)
{
struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
if (tunnel) {
if (tunnel->fd >= 0) {
/* Socket is owned by userspace */
sockfd_put(sk->sk_socket);
}
sock_put(sk);
}
sock_put(sk);
}
/* Session hash list. /* Session hash list.
* The session_id SHOULD be random according to RFC2661, but several * The session_id SHOULD be random according to RFC2661, but several
* L2TP implementations (Cisco and Microsoft) use incrementing * L2TP implementations (Cisco and Microsoft) use incrementing
@ -193,6 +148,13 @@ l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)]; return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
} }
void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
{
sock_put(tunnel->sock);
/* the tunnel is freed in the socket destructor */
}
EXPORT_SYMBOL(l2tp_tunnel_free);
/* Lookup a tunnel. A new reference is held on the returned tunnel. */ /* Lookup a tunnel. A new reference is held on the returned tunnel. */
struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id) struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
{ {
@ -345,13 +307,11 @@ int l2tp_session_register(struct l2tp_session *session,
} }
l2tp_tunnel_inc_refcount(tunnel); l2tp_tunnel_inc_refcount(tunnel);
sock_hold(tunnel->sock);
hlist_add_head_rcu(&session->global_hlist, g_head); hlist_add_head_rcu(&session->global_hlist, g_head);
spin_unlock_bh(&pn->l2tp_session_hlist_lock); spin_unlock_bh(&pn->l2tp_session_hlist_lock);
} else { } else {
l2tp_tunnel_inc_refcount(tunnel); l2tp_tunnel_inc_refcount(tunnel);
sock_hold(tunnel->sock);
} }
hlist_add_head(&session->hlist, head); hlist_add_head(&session->hlist, head);
@ -969,7 +929,7 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{ {
struct l2tp_tunnel *tunnel; struct l2tp_tunnel *tunnel;
tunnel = l2tp_sock_to_tunnel(sk); tunnel = l2tp_tunnel(sk);
if (tunnel == NULL) if (tunnel == NULL)
goto pass_up; goto pass_up;
@ -977,13 +937,10 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
tunnel->name, skb->len); tunnel->name, skb->len);
if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook)) if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook))
goto pass_up_put; goto pass_up;
sock_put(sk);
return 0; return 0;
pass_up_put:
sock_put(sk);
pass_up: pass_up:
return 1; return 1;
} }
@ -1207,14 +1164,12 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
static void l2tp_tunnel_destruct(struct sock *sk) static void l2tp_tunnel_destruct(struct sock *sk)
{ {
struct l2tp_tunnel *tunnel = l2tp_tunnel(sk); struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
struct l2tp_net *pn;
if (tunnel == NULL) if (tunnel == NULL)
goto end; goto end;
l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name); l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);
/* Disable udp encapsulation */ /* Disable udp encapsulation */
switch (tunnel->encap) { switch (tunnel->encap) {
case L2TP_ENCAPTYPE_UDP: case L2TP_ENCAPTYPE_UDP:
@ -1231,18 +1186,11 @@ static void l2tp_tunnel_destruct(struct sock *sk)
sk->sk_destruct = tunnel->old_sk_destruct; sk->sk_destruct = tunnel->old_sk_destruct;
sk->sk_user_data = NULL; sk->sk_user_data = NULL;
/* Remove the tunnel struct from the tunnel list */
pn = l2tp_pernet(tunnel->l2tp_net);
spin_lock_bh(&pn->l2tp_tunnel_list_lock);
list_del_rcu(&tunnel->list);
spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
tunnel->sock = NULL;
l2tp_tunnel_dec_refcount(tunnel);
/* Call the original destructor */ /* Call the original destructor */
if (sk->sk_destruct) if (sk->sk_destruct)
(*sk->sk_destruct)(sk); (*sk->sk_destruct)(sk);
kfree_rcu(tunnel, rcu);
end: end:
return; return;
} }
@ -1303,49 +1251,43 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
/* Tunnel socket destroy hook for UDP encapsulation */ /* Tunnel socket destroy hook for UDP encapsulation */
static void l2tp_udp_encap_destroy(struct sock *sk) static void l2tp_udp_encap_destroy(struct sock *sk)
{ {
struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
if (tunnel) {
l2tp_tunnel_closeall(tunnel); if (tunnel)
sock_put(sk); l2tp_tunnel_delete(tunnel);
}
} }
/* Workqueue tunnel deletion function */ /* Workqueue tunnel deletion function */
static void l2tp_tunnel_del_work(struct work_struct *work) static void l2tp_tunnel_del_work(struct work_struct *work)
{ {
struct l2tp_tunnel *tunnel = NULL; struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
struct socket *sock = NULL; del_work);
struct sock *sk = NULL; struct sock *sk = tunnel->sock;
struct socket *sock = sk->sk_socket;
tunnel = container_of(work, struct l2tp_tunnel, del_work); struct l2tp_net *pn;
l2tp_tunnel_closeall(tunnel); l2tp_tunnel_closeall(tunnel);
sk = l2tp_tunnel_sock_lookup(tunnel); /* If the tunnel socket was created within the kernel, use
if (!sk)
goto out;
sock = sk->sk_socket;
/* If the tunnel socket was created by userspace, then go through the
* inet layer to shut the socket down, and let userspace close it.
* Otherwise, if we created the socket directly within the kernel, use
* the sk API to release it here. * the sk API to release it here.
* In either case the tunnel resources are freed in the socket
* destructor when the tunnel socket goes away.
*/ */
if (tunnel->fd >= 0) { if (tunnel->fd < 0) {
if (sock)
inet_shutdown(sock, 2);
} else {
if (sock) { if (sock) {
kernel_sock_shutdown(sock, SHUT_RDWR); kernel_sock_shutdown(sock, SHUT_RDWR);
sock_release(sock); sock_release(sock);
} }
} }
l2tp_tunnel_sock_put(sk); /* Remove the tunnel struct from the tunnel list */
out: pn = l2tp_pernet(tunnel->l2tp_net);
spin_lock_bh(&pn->l2tp_tunnel_list_lock);
list_del_rcu(&tunnel->list);
spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
/* drop initial ref */
l2tp_tunnel_dec_refcount(tunnel);
/* drop workqueue ref */
l2tp_tunnel_dec_refcount(tunnel); l2tp_tunnel_dec_refcount(tunnel);
} }
@ -1598,13 +1540,22 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
sk->sk_user_data = tunnel; sk->sk_user_data = tunnel;
} }
/* Bump the reference count. The tunnel context is deleted
* only when this drops to zero. A reference is also held on
* the tunnel socket to ensure that it is not released while
* the tunnel is extant. Must be done before sk_destruct is
* set.
*/
refcount_set(&tunnel->ref_count, 1);
sock_hold(sk);
tunnel->sock = sk;
tunnel->fd = fd;
/* Hook on the tunnel socket destructor so that we can cleanup /* Hook on the tunnel socket destructor so that we can cleanup
* if the tunnel socket goes away. * if the tunnel socket goes away.
*/ */
tunnel->old_sk_destruct = sk->sk_destruct; tunnel->old_sk_destruct = sk->sk_destruct;
sk->sk_destruct = &l2tp_tunnel_destruct; sk->sk_destruct = &l2tp_tunnel_destruct;
tunnel->sock = sk;
tunnel->fd = fd;
lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock"); lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
sk->sk_allocation = GFP_ATOMIC; sk->sk_allocation = GFP_ATOMIC;
@ -1614,11 +1565,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
/* Add tunnel to our list */ /* Add tunnel to our list */
INIT_LIST_HEAD(&tunnel->list); INIT_LIST_HEAD(&tunnel->list);
/* Bump the reference count. The tunnel context is deleted
* only when this drops to zero. Must be done before list insertion
*/
refcount_set(&tunnel->ref_count, 1);
spin_lock_bh(&pn->l2tp_tunnel_list_lock); spin_lock_bh(&pn->l2tp_tunnel_list_lock);
list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list); list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
spin_unlock_bh(&pn->l2tp_tunnel_list_lock); spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
@ -1659,8 +1605,6 @@ void l2tp_session_free(struct l2tp_session *session)
if (tunnel) { if (tunnel) {
BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
sock_put(tunnel->sock);
session->tunnel = NULL;
l2tp_tunnel_dec_refcount(tunnel); l2tp_tunnel_dec_refcount(tunnel);
} }

View File

@ -214,27 +214,8 @@ static inline void *l2tp_session_priv(struct l2tp_session *session)
return &session->priv[0]; return &session->priv[0];
} }
static inline struct l2tp_tunnel *l2tp_sock_to_tunnel(struct sock *sk)
{
struct l2tp_tunnel *tunnel;
if (sk == NULL)
return NULL;
sock_hold(sk);
tunnel = (struct l2tp_tunnel *)(sk->sk_user_data);
if (tunnel == NULL) {
sock_put(sk);
goto out;
}
BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
out:
return tunnel;
}
struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id); struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id);
void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
struct l2tp_session *l2tp_session_get(const struct net *net, struct l2tp_session *l2tp_session_get(const struct net *net,
struct l2tp_tunnel *tunnel, struct l2tp_tunnel *tunnel,
@ -283,7 +264,7 @@ static inline void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel) static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
{ {
if (refcount_dec_and_test(&tunnel->ref_count)) if (refcount_dec_and_test(&tunnel->ref_count))
kfree_rcu(tunnel, rcu); l2tp_tunnel_free(tunnel);
} }
/* Session reference counts. Incremented when code obtains a reference /* Session reference counts. Incremented when code obtains a reference

View File

@ -234,17 +234,13 @@ static void l2tp_ip_close(struct sock *sk, long timeout)
static void l2tp_ip_destroy_sock(struct sock *sk) static void l2tp_ip_destroy_sock(struct sock *sk)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); struct l2tp_tunnel *tunnel = sk->sk_user_data;
while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
kfree_skb(skb); kfree_skb(skb);
if (tunnel) { if (tunnel)
l2tp_tunnel_closeall(tunnel); l2tp_tunnel_delete(tunnel);
sock_put(sk);
}
sk_refcnt_debug_dec(sk);
} }
static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)

View File

@ -248,16 +248,14 @@ static void l2tp_ip6_close(struct sock *sk, long timeout)
static void l2tp_ip6_destroy_sock(struct sock *sk) static void l2tp_ip6_destroy_sock(struct sock *sk)
{ {
struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); struct l2tp_tunnel *tunnel = sk->sk_user_data;
lock_sock(sk); lock_sock(sk);
ip6_flush_pending_frames(sk); ip6_flush_pending_frames(sk);
release_sock(sk); release_sock(sk);
if (tunnel) { if (tunnel)
l2tp_tunnel_closeall(tunnel); l2tp_tunnel_delete(tunnel);
sock_put(sk);
}
inet6_destroy_sock(sk); inet6_destroy_sock(sk);
} }

View File

@ -416,20 +416,28 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
* Session (and tunnel control) socket create/destroy. * Session (and tunnel control) socket create/destroy.
*****************************************************************************/ *****************************************************************************/
static void pppol2tp_put_sk(struct rcu_head *head)
{
struct pppol2tp_session *ps;
ps = container_of(head, typeof(*ps), rcu);
sock_put(ps->__sk);
}
/* Called by l2tp_core when a session socket is being closed. /* Called by l2tp_core when a session socket is being closed.
*/ */
static void pppol2tp_session_close(struct l2tp_session *session) static void pppol2tp_session_close(struct l2tp_session *session)
{ {
struct sock *sk; struct pppol2tp_session *ps;
BUG_ON(session->magic != L2TP_SESSION_MAGIC); ps = l2tp_session_priv(session);
mutex_lock(&ps->sk_lock);
sk = pppol2tp_session_get_sock(session); ps->__sk = rcu_dereference_protected(ps->sk,
if (sk) { lockdep_is_held(&ps->sk_lock));
if (sk->sk_socket) RCU_INIT_POINTER(ps->sk, NULL);
inet_shutdown(sk->sk_socket, SEND_SHUTDOWN); if (ps->__sk)
sock_put(sk); call_rcu(&ps->rcu, pppol2tp_put_sk);
} mutex_unlock(&ps->sk_lock);
} }
/* Really kill the session socket. (Called from sock_put() if /* Really kill the session socket. (Called from sock_put() if
@ -449,14 +457,6 @@ static void pppol2tp_session_destruct(struct sock *sk)
} }
} }
static void pppol2tp_put_sk(struct rcu_head *head)
{
struct pppol2tp_session *ps;
ps = container_of(head, typeof(*ps), rcu);
sock_put(ps->__sk);
}
/* Called when the PPPoX socket (session) is closed. /* Called when the PPPoX socket (session) is closed.
*/ */
static int pppol2tp_release(struct socket *sock) static int pppol2tp_release(struct socket *sock)
@ -480,26 +480,17 @@ static int pppol2tp_release(struct socket *sock)
sock_orphan(sk); sock_orphan(sk);
sock->sk = NULL; sock->sk = NULL;
session = pppol2tp_sock_to_session(sk); /* If the socket is associated with a session,
* l2tp_session_delete will call pppol2tp_session_close which
if (session != NULL) { * will drop the session's ref on the socket.
struct pppol2tp_session *ps;
l2tp_session_delete(session);
ps = l2tp_session_priv(session);
mutex_lock(&ps->sk_lock);
ps->__sk = rcu_dereference_protected(ps->sk,
lockdep_is_held(&ps->sk_lock));
RCU_INIT_POINTER(ps->sk, NULL);
mutex_unlock(&ps->sk_lock);
call_rcu(&ps->rcu, pppol2tp_put_sk);
/* Rely on the sock_put() call at the end of the function for
* dropping the reference held by pppol2tp_sock_to_session().
* The last reference will be dropped by pppol2tp_put_sk().
*/ */
session = pppol2tp_sock_to_session(sk);
if (session) {
l2tp_session_delete(session);
/* drop the ref obtained by pppol2tp_sock_to_session */
sock_put(sk);
} }
release_sock(sk); release_sock(sk);
/* This will delete the session context via /* This will delete the session context via
@ -796,6 +787,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
out_no_ppp: out_no_ppp:
/* This is how we get the session context from the socket. */ /* This is how we get the session context from the socket. */
sock_hold(sk);
sk->sk_user_data = session; sk->sk_user_data = session;
rcu_assign_pointer(ps->sk, sk); rcu_assign_pointer(ps->sk, sk);
mutex_unlock(&ps->sk_lock); mutex_unlock(&ps->sk_lock);

View File

@ -3921,7 +3921,7 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS | if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS |
IEEE80211_FCTL_TODS)) != IEEE80211_FCTL_TODS)) !=
fast_rx->expected_ds_bits) fast_rx->expected_ds_bits)
goto drop; return false;
/* assign the key to drop unencrypted frames (later) /* assign the key to drop unencrypted frames (later)
* and strip the IV/MIC if necessary * and strip the IV/MIC if necessary

View File

@ -3574,6 +3574,14 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
if (!IS_ERR_OR_NULL(sta)) { if (!IS_ERR_OR_NULL(sta)) {
struct ieee80211_fast_tx *fast_tx; struct ieee80211_fast_tx *fast_tx;
/* We need a bit of data queued to build aggregates properly, so
* instruct the TCP stack to allow more than a single ms of data
* to be queued in the stack. The value is a bit-shift of 1
* second, so 8 is ~4ms of queued data. Only affects local TCP
* sockets.
*/
sk_pacing_shift_update(skb->sk, 8);
fast_tx = rcu_dereference(sta->fast_tx); fast_tx = rcu_dereference(sta->fast_tx);
if (fast_tx && if (fast_tx &&

View File

@ -122,7 +122,7 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
if (skb->len <= mtu) if (skb->len <= mtu)
return false; return false;
if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
return false; return false;
return true; return true;

View File

@ -260,7 +260,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
buf_len = strlen(buf); buf_len = strlen(buf);
ct = nf_ct_get(skb, &ctinfo); ct = nf_ct_get(skb, &ctinfo);
if (ct && (ct->status & IPS_NAT_MASK)) { if (ct) {
bool mangled; bool mangled;
/* If mangling fails this function will return 0 /* If mangling fails this function will return 0

View File

@ -5037,9 +5037,9 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
{ {
const struct nfgenmsg *nfmsg = nlmsg_data(nlh); const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
const struct nf_flowtable_type *type; const struct nf_flowtable_type *type;
struct nft_flowtable *flowtable, *ft;
u8 genmask = nft_genmask_next(net); u8 genmask = nft_genmask_next(net);
int family = nfmsg->nfgen_family; int family = nfmsg->nfgen_family;
struct nft_flowtable *flowtable;
struct nft_table *table; struct nft_table *table;
struct nft_ctx ctx; struct nft_ctx ctx;
int err, i, k; int err, i, k;
@ -5099,6 +5099,22 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
goto err3; goto err3;
for (i = 0; i < flowtable->ops_len; i++) { for (i = 0; i < flowtable->ops_len; i++) {
if (!flowtable->ops[i].dev)
continue;
list_for_each_entry(ft, &table->flowtables, list) {
for (k = 0; k < ft->ops_len; k++) {
if (!ft->ops[k].dev)
continue;
if (flowtable->ops[i].dev == ft->ops[k].dev &&
flowtable->ops[i].pf == ft->ops[k].pf) {
err = -EBUSY;
goto err4;
}
}
}
err = nf_register_net_hook(net, &flowtable->ops[i]); err = nf_register_net_hook(net, &flowtable->ops[i]);
if (err < 0) if (err < 0)
goto err4; goto err4;
@ -5120,7 +5136,7 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
i = flowtable->ops_len; i = flowtable->ops_len;
err4: err4:
for (k = i - 1; k >= 0; k--) for (k = i - 1; k >= 0; k--)
nf_unregister_net_hook(net, &flowtable->ops[i]); nf_unregister_net_hook(net, &flowtable->ops[k]);
kfree(flowtable->ops); kfree(flowtable->ops);
err3: err3:
@ -5145,6 +5161,11 @@ static int nf_tables_delflowtable(struct net *net, struct sock *nlsk,
struct nft_table *table; struct nft_table *table;
struct nft_ctx ctx; struct nft_ctx ctx;
if (!nla[NFTA_FLOWTABLE_TABLE] ||
(!nla[NFTA_FLOWTABLE_NAME] &&
!nla[NFTA_FLOWTABLE_HANDLE]))
return -EINVAL;
table = nf_tables_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE], table = nf_tables_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE],
family, genmask); family, genmask);
if (IS_ERR(table)) if (IS_ERR(table))

View File

@ -114,5 +114,6 @@ static struct rpmsg_driver qcom_smd_qrtr_driver = {
module_rpmsg_driver(qcom_smd_qrtr_driver); module_rpmsg_driver(qcom_smd_qrtr_driver);
MODULE_ALIAS("rpmsg:IPCRTR");
MODULE_DESCRIPTION("Qualcomm IPC-Router SMD interface driver"); MODULE_DESCRIPTION("Qualcomm IPC-Router SMD interface driver");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2006 Oracle. All rights reserved. * Copyright (c) 2006, 2018 Oracle. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU * licenses. You may choose to be licensed under the terms of the GNU
@ -142,12 +142,20 @@ int rds_tcp_accept_one(struct socket *sock)
if (ret) if (ret)
goto out; goto out;
new_sock->type = sock->type;
new_sock->ops = sock->ops;
ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true); ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true);
if (ret < 0) if (ret < 0)
goto out; goto out;
/* sock_create_lite() does not get a hold on the owner module so we
* need to do it here. Note that sock_release() uses sock->ops to
* determine if it needs to decrement the reference count. So set
* sock->ops after calling accept() in case that fails. And there's
* no need to do try_module_get() as the listener should have a hold
* already.
*/
new_sock->ops = sock->ops;
__module_get(new_sock->ops->owner);
ret = rds_tcp_keepalive(new_sock); ret = rds_tcp_keepalive(new_sock);
if (ret < 0) if (ret < 0)
goto out; goto out;

View File

@ -188,7 +188,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
int ret; int ret;
if (qdisc_pkt_len(skb) > q->max_size) { if (qdisc_pkt_len(skb) > q->max_size) {
if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size) if (skb_is_gso(skb) &&
skb_gso_validate_mac_len(skb, q->max_size))
return tbf_segment(skb, sch, to_free); return tbf_segment(skb, sch, to_free);
return qdisc_drop(skb, sch, to_free); return qdisc_drop(skb, sch, to_free);
} }

View File

@ -1406,8 +1406,10 @@ static int smc_create(struct net *net, struct socket *sock, int protocol,
smc->use_fallback = false; /* assume rdma capability first */ smc->use_fallback = false; /* assume rdma capability first */
rc = sock_create_kern(net, PF_INET, SOCK_STREAM, rc = sock_create_kern(net, PF_INET, SOCK_STREAM,
IPPROTO_TCP, &smc->clcsock); IPPROTO_TCP, &smc->clcsock);
if (rc) if (rc) {
sk_common_release(sk); sk_common_release(sk);
goto out;
}
smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE); smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE); smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);

View File

@ -269,7 +269,7 @@ static void smc_cdc_rx_handler(struct ib_wc *wc, void *buf)
if (wc->byte_len < offsetof(struct smc_cdc_msg, reserved)) if (wc->byte_len < offsetof(struct smc_cdc_msg, reserved))
return; /* short message */ return; /* short message */
if (cdc->len != sizeof(*cdc)) if (cdc->len != SMC_WR_TX_SIZE)
return; /* invalid message */ return; /* invalid message */
smc_cdc_msg_recv(cdc, link, wc->wr_id); smc_cdc_msg_recv(cdc, link, wc->wr_id);
} }

View File

@ -177,6 +177,7 @@ static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr,
lnk = &lgr->lnk[SMC_SINGLE_LINK]; lnk = &lgr->lnk[SMC_SINGLE_LINK];
/* initialize link */ /* initialize link */
lnk->link_id = SMC_SINGLE_LINK;
lnk->smcibdev = smcibdev; lnk->smcibdev = smcibdev;
lnk->ibport = ibport; lnk->ibport = ibport;
lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu; lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu;
@ -465,7 +466,7 @@ int smc_conn_create(struct smc_sock *smc, __be32 peer_in_addr,
rc = smc_link_determine_gid(conn->lgr); rc = smc_link_determine_gid(conn->lgr);
} }
conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE; conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
conn->local_tx_ctrl.len = sizeof(struct smc_cdc_msg); conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
#ifndef KERNEL_HAS_ATOMIC64 #ifndef KERNEL_HAS_ATOMIC64
spin_lock_init(&conn->acurs_lock); spin_lock_init(&conn->acurs_lock);
#endif #endif

View File

@ -92,7 +92,7 @@ int smc_llc_send_confirm_link(struct smc_link *link, u8 mac[],
memcpy(confllc->sender_mac, mac, ETH_ALEN); memcpy(confllc->sender_mac, mac, ETH_ALEN);
memcpy(confllc->sender_gid, gid, SMC_GID_SIZE); memcpy(confllc->sender_gid, gid, SMC_GID_SIZE);
hton24(confllc->sender_qp_num, link->roce_qp->qp_num); hton24(confllc->sender_qp_num, link->roce_qp->qp_num);
/* confllc->link_num = SMC_SINGLE_LINK; already done by memset above */ confllc->link_num = link->link_id;
memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE); memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE);
confllc->max_links = SMC_LINKS_PER_LGR_MAX; confllc->max_links = SMC_LINKS_PER_LGR_MAX;
/* send llc message */ /* send llc message */

View File

@ -189,6 +189,7 @@ struct tipc_group *tipc_group_create(struct net *net, u32 portid,
grp->loopback = mreq->flags & TIPC_GROUP_LOOPBACK; grp->loopback = mreq->flags & TIPC_GROUP_LOOPBACK;
grp->events = mreq->flags & TIPC_GROUP_MEMBER_EVTS; grp->events = mreq->flags & TIPC_GROUP_MEMBER_EVTS;
grp->open = group_is_open; grp->open = group_is_open;
*grp->open = false;
filter |= global ? TIPC_SUB_CLUSTER_SCOPE : TIPC_SUB_NODE_SCOPE; filter |= global ? TIPC_SUB_CLUSTER_SCOPE : TIPC_SUB_NODE_SCOPE;
if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0, if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0,
filter, &grp->subid)) filter, &grp->subid))

View File

@ -473,6 +473,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
sk->sk_write_space = tipc_write_space; sk->sk_write_space = tipc_write_space;
sk->sk_destruct = tipc_sock_destruct; sk->sk_destruct = tipc_sock_destruct;
tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
tsk->group_is_open = true;
atomic_set(&tsk->dupl_rcvcnt, 0); atomic_set(&tsk->dupl_rcvcnt, 0);
/* Start out with safe limits until we receive an advertised window */ /* Start out with safe limits until we receive an advertised window */

View File

@ -45,17 +45,27 @@ MODULE_AUTHOR("Mellanox Technologies");
MODULE_DESCRIPTION("Transport Layer Security Support"); MODULE_DESCRIPTION("Transport Layer Security Support");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
enum {
TLSV4,
TLSV6,
TLS_NUM_PROTS,
};
enum { enum {
TLS_BASE_TX, TLS_BASE_TX,
TLS_SW_TX, TLS_SW_TX,
TLS_NUM_CONFIG, TLS_NUM_CONFIG,
}; };
static struct proto tls_prots[TLS_NUM_CONFIG]; static struct proto *saved_tcpv6_prot;
static DEFINE_MUTEX(tcpv6_prot_mutex);
static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG];
static inline void update_sk_prot(struct sock *sk, struct tls_context *ctx) static inline void update_sk_prot(struct sock *sk, struct tls_context *ctx)
{ {
sk->sk_prot = &tls_prots[ctx->tx_conf]; int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf];
} }
int wait_on_pending_writer(struct sock *sk, long *timeo) int wait_on_pending_writer(struct sock *sk, long *timeo)
@ -453,8 +463,21 @@ static int tls_setsockopt(struct sock *sk, int level, int optname,
return do_tls_setsockopt(sk, optname, optval, optlen); return do_tls_setsockopt(sk, optname, optval, optlen);
} }
static void build_protos(struct proto *prot, struct proto *base)
{
prot[TLS_BASE_TX] = *base;
prot[TLS_BASE_TX].setsockopt = tls_setsockopt;
prot[TLS_BASE_TX].getsockopt = tls_getsockopt;
prot[TLS_BASE_TX].close = tls_sk_proto_close;
prot[TLS_SW_TX] = prot[TLS_BASE_TX];
prot[TLS_SW_TX].sendmsg = tls_sw_sendmsg;
prot[TLS_SW_TX].sendpage = tls_sw_sendpage;
}
static int tls_init(struct sock *sk) static int tls_init(struct sock *sk)
{ {
int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
struct tls_context *ctx; struct tls_context *ctx;
int rc = 0; int rc = 0;
@ -479,6 +502,17 @@ static int tls_init(struct sock *sk)
ctx->getsockopt = sk->sk_prot->getsockopt; ctx->getsockopt = sk->sk_prot->getsockopt;
ctx->sk_proto_close = sk->sk_prot->close; ctx->sk_proto_close = sk->sk_prot->close;
/* Build IPv6 TLS whenever the address of tcpv6_prot changes */
if (ip_ver == TLSV6 &&
unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
mutex_lock(&tcpv6_prot_mutex);
if (likely(sk->sk_prot != saved_tcpv6_prot)) {
build_protos(tls_prots[TLSV6], sk->sk_prot);
smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
}
mutex_unlock(&tcpv6_prot_mutex);
}
ctx->tx_conf = TLS_BASE_TX; ctx->tx_conf = TLS_BASE_TX;
update_sk_prot(sk, ctx); update_sk_prot(sk, ctx);
out: out:
@ -493,21 +527,9 @@ static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
.init = tls_init, .init = tls_init,
}; };
static void build_protos(struct proto *prot, struct proto *base)
{
prot[TLS_BASE_TX] = *base;
prot[TLS_BASE_TX].setsockopt = tls_setsockopt;
prot[TLS_BASE_TX].getsockopt = tls_getsockopt;
prot[TLS_BASE_TX].close = tls_sk_proto_close;
prot[TLS_SW_TX] = prot[TLS_BASE_TX];
prot[TLS_SW_TX].sendmsg = tls_sw_sendmsg;
prot[TLS_SW_TX].sendpage = tls_sw_sendpage;
}
static int __init tls_register(void) static int __init tls_register(void)
{ {
build_protos(tls_prots, &tcp_prot); build_protos(tls_prots[TLSV4], &tcp_prot);
tcp_register_ulp(&tcp_tls_ulp_ops); tcp_register_ulp(&tcp_tls_ulp_ops);

View File

@ -34,9 +34,10 @@ config CFG80211
When built as a module it will be called cfg80211. When built as a module it will be called cfg80211.
if CFG80211
config NL80211_TESTMODE config NL80211_TESTMODE
bool "nl80211 testmode command" bool "nl80211 testmode command"
depends on CFG80211
help help
The nl80211 testmode command helps implementing things like The nl80211 testmode command helps implementing things like
factory calibration or validation tools for wireless chips. factory calibration or validation tools for wireless chips.
@ -51,7 +52,6 @@ config NL80211_TESTMODE
config CFG80211_DEVELOPER_WARNINGS config CFG80211_DEVELOPER_WARNINGS
bool "enable developer warnings" bool "enable developer warnings"
depends on CFG80211
default n default n
help help
This option enables some additional warnings that help This option enables some additional warnings that help
@ -68,7 +68,7 @@ config CFG80211_DEVELOPER_WARNINGS
config CFG80211_CERTIFICATION_ONUS config CFG80211_CERTIFICATION_ONUS
bool "cfg80211 certification onus" bool "cfg80211 certification onus"
depends on CFG80211 && EXPERT depends on EXPERT
default n default n
---help--- ---help---
You should disable this option unless you are both capable You should disable this option unless you are both capable
@ -159,7 +159,6 @@ config CFG80211_REG_RELAX_NO_IR
config CFG80211_DEFAULT_PS config CFG80211_DEFAULT_PS
bool "enable powersave by default" bool "enable powersave by default"
depends on CFG80211
default y default y
help help
This option enables powersave mode by default. This option enables powersave mode by default.
@ -170,7 +169,6 @@ config CFG80211_DEFAULT_PS
config CFG80211_DEBUGFS config CFG80211_DEBUGFS
bool "cfg80211 DebugFS entries" bool "cfg80211 DebugFS entries"
depends on CFG80211
depends on DEBUG_FS depends on DEBUG_FS
---help--- ---help---
You can enable this if you want debugfs entries for cfg80211. You can enable this if you want debugfs entries for cfg80211.
@ -180,7 +178,6 @@ config CFG80211_DEBUGFS
config CFG80211_CRDA_SUPPORT config CFG80211_CRDA_SUPPORT
bool "support CRDA" if EXPERT bool "support CRDA" if EXPERT
default y default y
depends on CFG80211
help help
You should enable this option unless you know for sure you have no You should enable this option unless you know for sure you have no
need for it, for example when using internal regdb (above) or the need for it, for example when using internal regdb (above) or the
@ -190,7 +187,6 @@ config CFG80211_CRDA_SUPPORT
config CFG80211_WEXT config CFG80211_WEXT
bool "cfg80211 wireless extensions compatibility" if !CFG80211_WEXT_EXPORT bool "cfg80211 wireless extensions compatibility" if !CFG80211_WEXT_EXPORT
depends on CFG80211
select WEXT_CORE select WEXT_CORE
default y if CFG80211_WEXT_EXPORT default y if CFG80211_WEXT_EXPORT
help help
@ -199,11 +195,12 @@ config CFG80211_WEXT
config CFG80211_WEXT_EXPORT config CFG80211_WEXT_EXPORT
bool bool
depends on CFG80211
help help
Drivers should select this option if they require cfg80211's Drivers should select this option if they require cfg80211's
wext compatibility symbols to be exported. wext compatibility symbols to be exported.
endif # CFG80211
config LIB80211 config LIB80211
tristate tristate
default n default n

View File

@ -217,7 +217,7 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
if (skb->len <= mtu) if (skb->len <= mtu)
goto ok; goto ok;
if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
goto ok; goto ok;
} }

View File

@ -11163,6 +11163,64 @@ static struct bpf_test tests[] = {
.result = REJECT, .result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT, .prog_type = BPF_PROG_TYPE_TRACEPOINT,
}, },
{
"xadd/w check unaligned stack",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "misaligned stack access off",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"xadd/w check unaligned map",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.result = REJECT,
.errstr = "misaligned value access off",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"xadd/w check unaligned pkt",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct xdp_md, data_end)),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
BPF_MOV64_IMM(BPF_REG_0, 99),
BPF_JMP_IMM(BPF_JA, 0, 0, 6),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "BPF_XADD stores into R2 packet",
.prog_type = BPF_PROG_TYPE_XDP,
},
}; };
static int probe_filter_length(const struct bpf_insn *fp) static int probe_filter_length(const struct bpf_insn *fp)

View File

@ -315,7 +315,7 @@
"cmdUnderTest": "$TC actions ls action skbmod", "cmdUnderTest": "$TC actions ls action skbmod",
"expExitCode": "0", "expExitCode": "0",
"verifyCmd": "$TC actions get action skbmod index 4", "verifyCmd": "$TC actions get action skbmod index 4",
"matchPattern": "action order [0-9]*: skbmod pipe set etype 0x0031", "matchPattern": "action order [0-9]*: skbmod pipe set etype 0x31",
"matchCount": "1", "matchCount": "1",
"teardown": [ "teardown": [
"$TC actions flush action skbmod" "$TC actions flush action skbmod"