mirror of https://gitee.com/openkylin/linux.git
Networking fixes for 5.11-rc5, including fixes from bpf, wireless,
and can trees. Current release - regressions: - nfc: nci: fix the wrong NCI_CORE_INIT parameters Current release - new code bugs: - bpf: allow empty module BTFs Previous releases - regressions: - bpf: fix signed_{sub,add32}_overflows type handling - tcp: do not mess with cloned skbs in tcp_add_backlog() - bpf: prevent double bpf_prog_put call from bpf_tracing_prog_attach - bpf: don't leak memory in bpf getsockopt when optlen == 0 - tcp: fix potential use-after-free due to double kfree() - mac80211: fix encryption issues with WEP - devlink: use right genl user_ptr when handling port param get/set - ipv6: set multicast flag on the multicast route - tcp: fix TCP_USER_TIMEOUT with zero window Previous releases - always broken: - bpf: local storage helpers should check nullness of owner ptr passed - mac80211: fix incorrect strlen of .write in debugfs - cls_flower: call nla_ok() before nla_next() - skbuff: back tiny skbs with kmalloc() in __netdev_alloc_skb() too Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmAIa+UACgkQMUZtbf5S IruZTQ/+O263ZyI0C5S1uCbHPCsAyjZyxECWDNfQ3tRzTfvldoRRP4YbC1ekSoXu 8Y9GKDDLMI2pYkNlCqfMhrFaop8sudosntOZDSeRm/2TkkQFnkM/bxAlz++7Rnwx vHu1Xo2t2bKJxooSw8gLJ5iZNTbkw/M5iA3qR9kP+BG1yDP7By4P/Y4ziFphffad gPlfLQaU8nRVuDBYYrGIX0GoMg05IH1zt2/MxvN4ReXuex/9tq2TrU8jxHiwT2ja K1DHR+g2VVZf55TWrL9Yw8V5Rr+F7bxf6i+yer9hWWhENXgoTv6QkndAnTFOcoat VQh44GzoNoL1dAHD8kyUOOxJCyjItJJe58Evcwjnls4o+5BC2aDNQADwrSyz3sHe l9iNMSMEylymu7Xu+cJw2kjOq/BK6TdjaGSxwm1M2ErPehf36eJuc4FkaJz3RO55 nkYMfm0+5rYWSsR5CTTJp8r2urCAT4SSx1iLoZknUXE6qa5AcMSNhIjGbw6pUp4q RDBtAKqiV0l37vdUag4Z+QgjPA0cH9E4aMQKYmD9dop20Zuzp4ug38qR32aEFC6q Qfb0VBMKgwu6OWjuWARbwYktVQNcoelKiGnsGnORJ5S9cyc1N4HeKEnb5Hw8ky5q 4FBpNMfx3Ief14iNkh65KrzA+uyZBjqEG+joTSzn+9R7Lof60QA= =KyY7 -----END PGP SIGNATURE----- Merge tag 'net-5.11-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "Networking fixes for 5.11-rc5, including fixes from bpf, wireless, and can trees. Current release - regressions: - nfc: nci: fix the wrong NCI_CORE_INIT parameters Current release - new code bugs: - bpf: allow empty module BTFs Previous releases - regressions: - bpf: fix signed_{sub,add32}_overflows type handling - tcp: do not mess with cloned skbs in tcp_add_backlog() - bpf: prevent double bpf_prog_put call from bpf_tracing_prog_attach - bpf: don't leak memory in bpf getsockopt when optlen == 0 - tcp: fix potential use-after-free due to double kfree() - mac80211: fix encryption issues with WEP - devlink: use right genl user_ptr when handling port param get/set - ipv6: set multicast flag on the multicast route - tcp: fix TCP_USER_TIMEOUT with zero window Previous releases - always broken: - bpf: local storage helpers should check nullness of owner ptr passed - mac80211: fix incorrect strlen of .write in debugfs - cls_flower: call nla_ok() before nla_next() - skbuff: back tiny skbs with kmalloc() in __netdev_alloc_skb() too" * tag 'net-5.11-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (52 commits) net: systemport: free dev before on error path net: usb: cdc_ncm: don't spew notifications net: mscc: ocelot: Fix multicast to the CPU port tcp: Fix potential use-after-free due to double kfree() bpf: Fix signed_{sub,add32}_overflows type handling can: peak_usb: fix use after free bugs can: vxcan: vxcan_xmit: fix use after free bug can: dev: can_restart: fix use after free bug tcp: fix TCP socket rehash stats mis-accounting net: dsa: b53: fix an off by one in checking "vlan->vid" tcp: do not mess with cloned skbs in tcp_add_backlog() selftests: net: fib_tests: remove duplicate log test net: nfc: nci: fix the wrong NCI_CORE_INIT parameters sh_eth: Fix power down vs. is_opened flag ordering net: Disable NETIF_F_HW_TLS_RX when RXCSUM is disabled netfilter: rpfilter: mask ecn bits before fib lookup udp: mask TOS bits in udp_v4_early_demux() xsk: Clear pool even for inactive queues bpf: Fix helper bpf_map_peek_elem_proto pointing to wrong callback sh_eth: Make PHY access aware of Runtime PM to fix reboot crash ...
This commit is contained in:
commit
75439bc439
2
.mailmap
2
.mailmap
|
@ -55,6 +55,8 @@ Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
|
|||
Ben Gardner <bgardner@wabtec.com>
|
||||
Ben M Cahill <ben.m.cahill@intel.com>
|
||||
Björn Steinbrink <B.Steinbrink@gmx.de>
|
||||
Björn Töpel <bjorn@kernel.org> <bjorn.topel@gmail.com>
|
||||
Björn Töpel <bjorn@kernel.org> <bjorn.topel@intel.com>
|
||||
Boris Brezillon <bbrezillon@kernel.org> <b.brezillon.dev@gmail.com>
|
||||
Boris Brezillon <bbrezillon@kernel.org> <b.brezillon@overkiz.com>
|
||||
Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@bootlin.com>
|
||||
|
|
|
@ -534,3 +534,6 @@ offload. Hence, TLS TX device feature flag requires TX csum offload being set.
|
|||
Disabling the latter implies clearing the former. Disabling TX checksum offload
|
||||
should not affect old connections, and drivers should make sure checksum
|
||||
calculation does not break for them.
|
||||
Similarly, device-offloaded TLS decryption implies doing RXCSUM. If the user
|
||||
does not want to enable RX csum offload, TLS RX device feature is disabled
|
||||
as well.
|
||||
|
|
|
@ -3334,7 +3334,7 @@ F: arch/riscv/net/
|
|||
X: arch/riscv/net/bpf_jit_comp64.c
|
||||
|
||||
BPF JIT for RISC-V (64-bit)
|
||||
M: Björn Töpel <bjorn.topel@gmail.com>
|
||||
M: Björn Töpel <bjorn@kernel.org>
|
||||
L: netdev@vger.kernel.org
|
||||
L: bpf@vger.kernel.org
|
||||
S: Maintained
|
||||
|
@ -19416,7 +19416,7 @@ F: drivers/net/ethernet/*/*/*xdp*
|
|||
K: (?:\b|_)xdp(?:\b|_)
|
||||
|
||||
XDP SOCKETS (AF_XDP)
|
||||
M: Björn Töpel <bjorn.topel@intel.com>
|
||||
M: Björn Töpel <bjorn@kernel.org>
|
||||
M: Magnus Karlsson <magnus.karlsson@intel.com>
|
||||
R: Jonathan Lemon <jonathan.lemon@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
|
|
|
@ -592,11 +592,11 @@ static void can_restart(struct net_device *dev)
|
|||
|
||||
cf->can_id |= CAN_ERR_RESTARTED;
|
||||
|
||||
netif_rx_ni(skb);
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->len;
|
||||
|
||||
netif_rx_ni(skb);
|
||||
|
||||
restart:
|
||||
netdev_dbg(dev, "restarted\n");
|
||||
priv->can_stats.restarts++;
|
||||
|
|
|
@ -514,11 +514,11 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
|
|||
else
|
||||
memcpy(cfd->data, rm->d, cfd->len);
|
||||
|
||||
peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
|
||||
|
||||
netdev->stats.rx_packets++;
|
||||
netdev->stats.rx_bytes += cfd->len;
|
||||
|
||||
peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -580,11 +580,11 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
|
|||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
|
||||
|
||||
netdev->stats.rx_packets++;
|
||||
netdev->stats.rx_bytes += cf->len;
|
||||
|
||||
peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
struct net_device *peer;
|
||||
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
|
||||
struct net_device_stats *peerstats, *srcstats = &dev->stats;
|
||||
u8 len;
|
||||
|
||||
if (can_dropped_invalid_skb(dev, skb))
|
||||
return NETDEV_TX_OK;
|
||||
|
@ -61,12 +62,13 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
skb->dev = peer;
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
|
||||
len = cfd->len;
|
||||
if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
|
||||
srcstats->tx_packets++;
|
||||
srcstats->tx_bytes += cfd->len;
|
||||
srcstats->tx_bytes += len;
|
||||
peerstats = &peer->stats;
|
||||
peerstats->rx_packets++;
|
||||
peerstats->rx_bytes += cfd->len;
|
||||
peerstats->rx_bytes += len;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
|
|
|
@ -1404,7 +1404,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
|
|||
!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED))
|
||||
return -EINVAL;
|
||||
|
||||
if (vlan->vid_end > dev->num_vlans)
|
||||
if (vlan->vid_end >= dev->num_vlans)
|
||||
return -ERANGE;
|
||||
|
||||
b53_enable_vlan(dev, true, ds->vlan_filtering);
|
||||
|
|
|
@ -351,6 +351,10 @@ int mv88e6250_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
err = mv88e6185_g1_stu_data_read(chip, entry);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* VTU DBNum[3:0] are located in VTU Operation 3:0
|
||||
* VTU DBNum[5:4] are located in VTU Operation 9:8
|
||||
*/
|
||||
|
|
|
@ -2503,8 +2503,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
|
|||
priv = netdev_priv(dev);
|
||||
|
||||
priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport");
|
||||
if (IS_ERR(priv->clk))
|
||||
return PTR_ERR(priv->clk);
|
||||
if (IS_ERR(priv->clk)) {
|
||||
ret = PTR_ERR(priv->clk);
|
||||
goto err_free_netdev;
|
||||
}
|
||||
|
||||
/* Allocate number of TX rings */
|
||||
priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
|
||||
|
|
|
@ -469,6 +469,9 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
|
|||
int pf = rvu_get_pf(req->hdr.pcifunc);
|
||||
u8 cgx_id, lmac_id;
|
||||
|
||||
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
|
||||
return -EPERM;
|
||||
|
||||
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
|
||||
|
||||
cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
|
||||
|
@ -485,6 +488,9 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
|
|||
int rc = 0, i;
|
||||
u64 cfg;
|
||||
|
||||
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
|
||||
return -EPERM;
|
||||
|
||||
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
|
||||
|
||||
rsp->hdr.rc = rc;
|
||||
|
|
|
@ -60,14 +60,27 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port,
|
|||
const unsigned char mac[ETH_ALEN],
|
||||
unsigned int vid, enum macaccess_entry_type type)
|
||||
{
|
||||
u32 cmd = ANA_TABLES_MACACCESS_VALID |
|
||||
ANA_TABLES_MACACCESS_DEST_IDX(port) |
|
||||
ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
|
||||
ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN);
|
||||
unsigned int mc_ports;
|
||||
|
||||
/* Set MAC_CPU_COPY if the CPU port is used by a multicast entry */
|
||||
if (type == ENTRYTYPE_MACv4)
|
||||
mc_ports = (mac[1] << 8) | mac[2];
|
||||
else if (type == ENTRYTYPE_MACv6)
|
||||
mc_ports = (mac[0] << 8) | mac[1];
|
||||
else
|
||||
mc_ports = 0;
|
||||
|
||||
if (mc_ports & BIT(ocelot->num_phys_ports))
|
||||
cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY;
|
||||
|
||||
ocelot_mact_select(ocelot, mac, vid);
|
||||
|
||||
/* Issue a write command */
|
||||
ocelot_write(ocelot, ANA_TABLES_MACACCESS_VALID |
|
||||
ANA_TABLES_MACACCESS_DEST_IDX(port) |
|
||||
ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
|
||||
ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN),
|
||||
ANA_TABLES_MACACCESS);
|
||||
ocelot_write(ocelot, cmd, ANA_TABLES_MACACCESS);
|
||||
|
||||
return ocelot_mact_wait_for_completion(ocelot);
|
||||
}
|
||||
|
|
|
@ -1042,10 +1042,8 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
|
|||
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
||||
int ret = 0;
|
||||
|
||||
if (!ocelot_netdevice_dev_check(dev))
|
||||
return 0;
|
||||
|
||||
if (event == NETDEV_PRECHANGEUPPER &&
|
||||
ocelot_netdevice_dev_check(dev) &&
|
||||
netif_is_lag_master(info->upper_dev)) {
|
||||
struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
|
||||
struct netlink_ext_ack *extack;
|
||||
|
|
|
@ -2606,10 +2606,10 @@ static int sh_eth_close(struct net_device *ndev)
|
|||
/* Free all the skbuffs in the Rx queue and the DMA buffer. */
|
||||
sh_eth_ring_free(ndev);
|
||||
|
||||
pm_runtime_put_sync(&mdp->pdev->dev);
|
||||
|
||||
mdp->is_opened = 0;
|
||||
|
||||
pm_runtime_put(&mdp->pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3034,6 +3034,28 @@ static int sh_mdio_release(struct sh_eth_private *mdp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int sh_mdiobb_read(struct mii_bus *bus, int phy, int reg)
|
||||
{
|
||||
int res;
|
||||
|
||||
pm_runtime_get_sync(bus->parent);
|
||||
res = mdiobb_read(bus, phy, reg);
|
||||
pm_runtime_put(bus->parent);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static int sh_mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
|
||||
{
|
||||
int res;
|
||||
|
||||
pm_runtime_get_sync(bus->parent);
|
||||
res = mdiobb_write(bus, phy, reg, val);
|
||||
pm_runtime_put(bus->parent);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/* MDIO bus init function */
|
||||
static int sh_mdio_init(struct sh_eth_private *mdp,
|
||||
struct sh_eth_plat_data *pd)
|
||||
|
@ -3058,6 +3080,10 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
|
|||
if (!mdp->mii_bus)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Wrap accessors with Runtime PM-aware ops */
|
||||
mdp->mii_bus->read = sh_mdiobb_read;
|
||||
mdp->mii_bus->write = sh_mdiobb_write;
|
||||
|
||||
/* Hook up MII support for ethtool */
|
||||
mdp->mii_bus->name = "sh_mii";
|
||||
mdp->mii_bus->parent = dev;
|
||||
|
|
|
@ -149,7 +149,7 @@ static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr)
|
|||
return dev_addr;
|
||||
}
|
||||
|
||||
static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
|
||||
int mdiobb_read(struct mii_bus *bus, int phy, int reg)
|
||||
{
|
||||
struct mdiobb_ctrl *ctrl = bus->priv;
|
||||
int ret, i;
|
||||
|
@ -180,8 +180,9 @@ static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
|
|||
mdiobb_get_bit(ctrl);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(mdiobb_read);
|
||||
|
||||
static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
|
||||
int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
|
||||
{
|
||||
struct mdiobb_ctrl *ctrl = bus->priv;
|
||||
|
||||
|
@ -201,6 +202,7 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
|
|||
mdiobb_get_bit(ctrl);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(mdiobb_write);
|
||||
|
||||
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
|
||||
{
|
||||
|
|
|
@ -1827,6 +1827,15 @@ cdc_ncm_speed_change(struct usbnet *dev,
|
|||
uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
|
||||
uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
|
||||
|
||||
/* if the speed hasn't changed, don't report it.
|
||||
* RTL8156 shipped before 2021 sends notification about every 32ms.
|
||||
*/
|
||||
if (dev->rx_speed == rx_speed && dev->tx_speed == tx_speed)
|
||||
return;
|
||||
|
||||
dev->rx_speed = rx_speed;
|
||||
dev->tx_speed = tx_speed;
|
||||
|
||||
/*
|
||||
* Currently the USB-NET API does not support reporting the actual
|
||||
* device speed. Do print it instead.
|
||||
|
@ -1867,6 +1876,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
|
|||
* USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
|
||||
* sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
|
||||
*/
|
||||
if (netif_carrier_ok(dev->net) != !!event->wValue)
|
||||
usbnet_link_change(dev, !!event->wValue, 0);
|
||||
break;
|
||||
|
||||
|
|
|
@ -35,6 +35,9 @@ struct mdiobb_ctrl {
|
|||
const struct mdiobb_ops *ops;
|
||||
};
|
||||
|
||||
int mdiobb_read(struct mii_bus *bus, int phy, int reg);
|
||||
int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val);
|
||||
|
||||
/* The returned bus is not yet registered with the phy layer. */
|
||||
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl);
|
||||
|
||||
|
|
|
@ -81,6 +81,8 @@ struct usbnet {
|
|||
# define EVENT_LINK_CHANGE 11
|
||||
# define EVENT_SET_RX_MODE 12
|
||||
# define EVENT_NO_IP_ALIGN 13
|
||||
u32 rx_speed; /* in bps - NOT Mbps */
|
||||
u32 tx_speed; /* in bps - NOT Mbps */
|
||||
};
|
||||
|
||||
static inline struct usb_driver *driver_of(struct usb_interface *intf)
|
||||
|
|
|
@ -1756,7 +1756,7 @@ struct cfg80211_sar_specs {
|
|||
|
||||
|
||||
/**
|
||||
* @struct cfg80211_sar_chan_ranges - sar frequency ranges
|
||||
* struct cfg80211_sar_freq_ranges - sar frequency ranges
|
||||
* @start_freq: start range edge frequency
|
||||
* @end_freq: end range edge frequency
|
||||
*/
|
||||
|
@ -3972,6 +3972,8 @@ struct mgmt_frame_regs {
|
|||
* This callback may sleep.
|
||||
* @reset_tid_config: Reset TID specific configuration for the peer, for the
|
||||
* given TIDs. This callback may sleep.
|
||||
*
|
||||
* @set_sar_specs: Update the SAR (TX power) settings.
|
||||
*/
|
||||
struct cfg80211_ops {
|
||||
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
|
||||
|
@ -4929,6 +4931,7 @@ struct wiphy_iftype_akm_suites {
|
|||
* @max_data_retry_count: maximum supported per TID retry count for
|
||||
* configuration through the %NL80211_TID_CONFIG_ATTR_RETRY_SHORT and
|
||||
* %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes
|
||||
* @sar_capa: SAR control capabilities
|
||||
*/
|
||||
struct wiphy {
|
||||
/* assign these fields before you register the wiphy */
|
||||
|
|
|
@ -76,6 +76,8 @@ struct inet_connection_sock_af_ops {
|
|||
* @icsk_ext_hdr_len: Network protocol overhead (IP/IPv6 options)
|
||||
* @icsk_ack: Delayed ACK control data
|
||||
* @icsk_mtup; MTU probing control data
|
||||
* @icsk_probes_tstamp: Probe timestamp (cleared by non-zero window ack)
|
||||
* @icsk_user_timeout: TCP_USER_TIMEOUT value
|
||||
*/
|
||||
struct inet_connection_sock {
|
||||
/* inet_sock has to be the first member! */
|
||||
|
@ -129,6 +131,7 @@ struct inet_connection_sock {
|
|||
|
||||
u32 probe_timestamp;
|
||||
} icsk_mtup;
|
||||
u32 icsk_probes_tstamp;
|
||||
u32 icsk_user_timeout;
|
||||
|
||||
u64 icsk_ca_priv[104 / sizeof(u64)];
|
||||
|
|
|
@ -3880,6 +3880,7 @@ enum ieee80211_reconfig_type {
|
|||
* This callback may sleep.
|
||||
* @sta_set_4addr: Called to notify the driver when a station starts/stops using
|
||||
* 4-address mode
|
||||
* @set_sar_specs: Update the SAR (TX power) settings.
|
||||
*/
|
||||
struct ieee80211_ops {
|
||||
void (*tx)(struct ieee80211_hw *hw,
|
||||
|
|
|
@ -1921,10 +1921,13 @@ static inline void sk_set_txhash(struct sock *sk)
|
|||
sk->sk_txhash = net_tx_rndhash();
|
||||
}
|
||||
|
||||
static inline void sk_rethink_txhash(struct sock *sk)
|
||||
static inline bool sk_rethink_txhash(struct sock *sk)
|
||||
{
|
||||
if (sk->sk_txhash)
|
||||
if (sk->sk_txhash) {
|
||||
sk_set_txhash(sk);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct dst_entry *
|
||||
|
@ -1947,12 +1950,10 @@ sk_dst_get(struct sock *sk)
|
|||
return dst;
|
||||
}
|
||||
|
||||
static inline void dst_negative_advice(struct sock *sk)
|
||||
static inline void __dst_negative_advice(struct sock *sk)
|
||||
{
|
||||
struct dst_entry *ndst, *dst = __sk_dst_get(sk);
|
||||
|
||||
sk_rethink_txhash(sk);
|
||||
|
||||
if (dst && dst->ops->negative_advice) {
|
||||
ndst = dst->ops->negative_advice(dst);
|
||||
|
||||
|
@ -1964,6 +1965,12 @@ static inline void dst_negative_advice(struct sock *sk)
|
|||
}
|
||||
}
|
||||
|
||||
static inline void dst_negative_advice(struct sock *sk)
|
||||
{
|
||||
sk_rethink_txhash(sk);
|
||||
__dst_negative_advice(sk);
|
||||
}
|
||||
|
||||
static inline void
|
||||
__sk_dst_set(struct sock *sk, struct dst_entry *dst)
|
||||
{
|
||||
|
|
|
@ -176,14 +176,14 @@ BPF_CALL_4(bpf_inode_storage_get, struct bpf_map *, map, struct inode *, inode,
|
|||
* bpf_local_storage_update expects the owner to have a
|
||||
* valid storage pointer.
|
||||
*/
|
||||
if (!inode_storage_ptr(inode))
|
||||
if (!inode || !inode_storage_ptr(inode))
|
||||
return (unsigned long)NULL;
|
||||
|
||||
sdata = inode_storage_lookup(inode, map, true);
|
||||
if (sdata)
|
||||
return (unsigned long)sdata->data;
|
||||
|
||||
/* This helper must only called from where the inode is gurranteed
|
||||
/* This helper must only called from where the inode is guaranteed
|
||||
* to have a refcount and cannot be freed.
|
||||
*/
|
||||
if (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) {
|
||||
|
@ -200,7 +200,10 @@ BPF_CALL_4(bpf_inode_storage_get, struct bpf_map *, map, struct inode *, inode,
|
|||
BPF_CALL_2(bpf_inode_storage_delete,
|
||||
struct bpf_map *, map, struct inode *, inode)
|
||||
{
|
||||
/* This helper must only called from where the inode is gurranteed
|
||||
if (!inode)
|
||||
return -EINVAL;
|
||||
|
||||
/* This helper must only called from where the inode is guaranteed
|
||||
* to have a refcount and cannot be freed.
|
||||
*/
|
||||
return inode_storage_delete(inode, map);
|
||||
|
|
|
@ -218,7 +218,7 @@ BPF_CALL_4(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
|
|||
* bpf_local_storage_update expects the owner to have a
|
||||
* valid storage pointer.
|
||||
*/
|
||||
if (!task_storage_ptr(task))
|
||||
if (!task || !task_storage_ptr(task))
|
||||
return (unsigned long)NULL;
|
||||
|
||||
sdata = task_storage_lookup(task, map, true);
|
||||
|
@ -243,6 +243,9 @@ BPF_CALL_4(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
|
|||
BPF_CALL_2(bpf_task_storage_delete, struct bpf_map *, map, struct task_struct *,
|
||||
task)
|
||||
{
|
||||
if (!task)
|
||||
return -EINVAL;
|
||||
|
||||
/* This helper must only be called from places where the lifetime of the task
|
||||
* is guaranteed. Either by being refcounted or by being protected
|
||||
* by an RCU read-side critical section.
|
||||
|
|
|
@ -4172,7 +4172,7 @@ static int btf_parse_hdr(struct btf_verifier_env *env)
|
|||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
if (btf_data_size == hdr->hdr_len) {
|
||||
if (!btf->base_btf && btf_data_size == hdr->hdr_len) {
|
||||
btf_verifier_log(env, "No data");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -1391,11 +1391,12 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
|
|||
if (ctx.optlen != 0) {
|
||||
*optlen = ctx.optlen;
|
||||
*kernel_optval = ctx.optval;
|
||||
/* export and don't free sockopt buf */
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
if (ret)
|
||||
sockopt_free_buf(&ctx);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -108,7 +108,7 @@ BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
|
|||
}
|
||||
|
||||
const struct bpf_func_proto bpf_map_peek_elem_proto = {
|
||||
.func = bpf_map_pop_elem,
|
||||
.func = bpf_map_peek_elem,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
|
|
|
@ -2712,7 +2712,6 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog,
|
|||
out_put_prog:
|
||||
if (tgt_prog_fd && tgt_prog)
|
||||
bpf_prog_put(tgt_prog);
|
||||
bpf_prog_put(prog);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -2825,7 +2824,10 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
|
|||
tp_name = prog->aux->attach_func_name;
|
||||
break;
|
||||
}
|
||||
return bpf_tracing_prog_attach(prog, 0, 0);
|
||||
err = bpf_tracing_prog_attach(prog, 0, 0);
|
||||
if (err >= 0)
|
||||
return err;
|
||||
goto out_put_prog;
|
||||
case BPF_PROG_TYPE_RAW_TRACEPOINT:
|
||||
case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
|
||||
if (strncpy_from_user(buf,
|
||||
|
|
|
@ -2217,6 +2217,8 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
|
|||
case PTR_TO_RDWR_BUF:
|
||||
case PTR_TO_RDWR_BUF_OR_NULL:
|
||||
case PTR_TO_PERCPU_BTF_ID:
|
||||
case PTR_TO_MEM:
|
||||
case PTR_TO_MEM_OR_NULL:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
|
@ -5311,7 +5313,7 @@ static bool signed_add_overflows(s64 a, s64 b)
|
|||
return res < a;
|
||||
}
|
||||
|
||||
static bool signed_add32_overflows(s64 a, s64 b)
|
||||
static bool signed_add32_overflows(s32 a, s32 b)
|
||||
{
|
||||
/* Do the add in u32, where overflow is well-defined */
|
||||
s32 res = (s32)((u32)a + (u32)b);
|
||||
|
@ -5321,7 +5323,7 @@ static bool signed_add32_overflows(s64 a, s64 b)
|
|||
return res < a;
|
||||
}
|
||||
|
||||
static bool signed_sub_overflows(s32 a, s32 b)
|
||||
static bool signed_sub_overflows(s64 a, s64 b)
|
||||
{
|
||||
/* Do the sub in u64, where overflow is well-defined */
|
||||
s64 res = (s64)((u64)a - (u64)b);
|
||||
|
@ -5333,7 +5335,7 @@ static bool signed_sub_overflows(s32 a, s32 b)
|
|||
|
||||
static bool signed_sub32_overflows(s32 a, s32 b)
|
||||
{
|
||||
/* Do the sub in u64, where overflow is well-defined */
|
||||
/* Do the sub in u32, where overflow is well-defined */
|
||||
s32 res = (s32)((u32)a - (u32)b);
|
||||
|
||||
if (b < 0)
|
||||
|
|
|
@ -272,7 +272,8 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
|
|||
kattr->test.repeat)
|
||||
return -EINVAL;
|
||||
|
||||
if (ctx_size_in < prog->aux->max_ctx_offset)
|
||||
if (ctx_size_in < prog->aux->max_ctx_offset ||
|
||||
ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
|
||||
return -EINVAL;
|
||||
|
||||
if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
|
||||
|
|
|
@ -9672,6 +9672,11 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
|
|||
}
|
||||
}
|
||||
|
||||
if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
|
||||
netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
|
||||
features &= ~NETIF_F_HW_TLS_RX;
|
||||
}
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
|
|
|
@ -4146,7 +4146,7 @@ static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg,
|
|||
static int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb,
|
||||
struct genl_info *info)
|
||||
{
|
||||
struct devlink_port *devlink_port = info->user_ptr[0];
|
||||
struct devlink_port *devlink_port = info->user_ptr[1];
|
||||
struct devlink_param_item *param_item;
|
||||
struct sk_buff *msg;
|
||||
int err;
|
||||
|
@ -4175,7 +4175,7 @@ static int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb,
|
|||
static int devlink_nl_cmd_port_param_set_doit(struct sk_buff *skb,
|
||||
struct genl_info *info)
|
||||
{
|
||||
struct devlink_port *devlink_port = info->user_ptr[0];
|
||||
struct devlink_port *devlink_port = info->user_ptr[1];
|
||||
|
||||
return __devlink_nl_cmd_param_set_doit(devlink_port->devlink,
|
||||
devlink_port->index,
|
||||
|
|
|
@ -80,11 +80,11 @@ static void est_timer(struct timer_list *t)
|
|||
u64 rate, brate;
|
||||
|
||||
est_fetch_counters(est, &b);
|
||||
brate = (b.bytes - est->last_bytes) << (10 - est->ewma_log - est->intvl_log);
|
||||
brate -= (est->avbps >> est->ewma_log);
|
||||
brate = (b.bytes - est->last_bytes) << (10 - est->intvl_log);
|
||||
brate = (brate >> est->ewma_log) - (est->avbps >> est->ewma_log);
|
||||
|
||||
rate = (b.packets - est->last_packets) << (10 - est->ewma_log - est->intvl_log);
|
||||
rate -= (est->avpps >> est->ewma_log);
|
||||
rate = (b.packets - est->last_packets) << (10 - est->intvl_log);
|
||||
rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log);
|
||||
|
||||
write_seqcount_begin(&est->seq);
|
||||
est->avbps += brate;
|
||||
|
@ -143,6 +143,9 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
|
|||
if (parm->interval < -2 || parm->interval > 3)
|
||||
return -EINVAL;
|
||||
|
||||
if (parm->ewma_log == 0 || parm->ewma_log >= 31)
|
||||
return -EINVAL;
|
||||
|
||||
est = kzalloc(sizeof(*est), GFP_KERNEL);
|
||||
if (!est)
|
||||
return -ENOBUFS;
|
||||
|
|
|
@ -437,7 +437,11 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
|
|||
|
||||
len += NET_SKB_PAD;
|
||||
|
||||
if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
|
||||
/* If requested length is either too small or too big,
|
||||
* we use kmalloc() for skb->head allocation.
|
||||
*/
|
||||
if (len <= SKB_WITH_OVERHEAD(1024) ||
|
||||
len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
|
||||
(gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
|
||||
skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
|
||||
if (!skb)
|
||||
|
|
|
@ -851,6 +851,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
|
|||
newicsk->icsk_retransmits = 0;
|
||||
newicsk->icsk_backoff = 0;
|
||||
newicsk->icsk_probes_out = 0;
|
||||
newicsk->icsk_probes_tstamp = 0;
|
||||
|
||||
/* Deinitialize accept_queue to trap illegal accesses. */
|
||||
memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
|
||||
|
|
|
@ -76,7 +76,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
|
|||
flow.daddr = iph->saddr;
|
||||
flow.saddr = rpfilter_get_saddr(iph->daddr);
|
||||
flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
|
||||
flow.flowi4_tos = RT_TOS(iph->tos);
|
||||
flow.flowi4_tos = iph->tos & IPTOS_RT_MASK;
|
||||
flow.flowi4_scope = RT_SCOPE_UNIVERSE;
|
||||
flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par));
|
||||
|
||||
|
|
|
@ -2937,6 +2937,7 @@ int tcp_disconnect(struct sock *sk, int flags)
|
|||
|
||||
icsk->icsk_backoff = 0;
|
||||
icsk->icsk_probes_out = 0;
|
||||
icsk->icsk_probes_tstamp = 0;
|
||||
icsk->icsk_rto = TCP_TIMEOUT_INIT;
|
||||
icsk->icsk_rto_min = TCP_RTO_MIN;
|
||||
icsk->icsk_delack_max = TCP_DELACK_MAX;
|
||||
|
|
|
@ -3384,6 +3384,7 @@ static void tcp_ack_probe(struct sock *sk)
|
|||
return;
|
||||
if (!after(TCP_SKB_CB(head)->end_seq, tcp_wnd_end(tp))) {
|
||||
icsk->icsk_backoff = 0;
|
||||
icsk->icsk_probes_tstamp = 0;
|
||||
inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
|
||||
/* Socket must be waked up by subsequent tcp_data_snd_check().
|
||||
* This function is not for random using!
|
||||
|
@ -4396,11 +4397,10 @@ static void tcp_rcv_spurious_retrans(struct sock *sk, const struct sk_buff *skb)
|
|||
* The receiver remembers and reflects via DSACKs. Leverage the
|
||||
* DSACK state and change the txhash to re-route speculatively.
|
||||
*/
|
||||
if (TCP_SKB_CB(skb)->seq == tcp_sk(sk)->duplicate_sack[0].start_seq) {
|
||||
sk_rethink_txhash(sk);
|
||||
if (TCP_SKB_CB(skb)->seq == tcp_sk(sk)->duplicate_sack[0].start_seq &&
|
||||
sk_rethink_txhash(sk))
|
||||
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDUPLICATEDATAREHASH);
|
||||
}
|
||||
}
|
||||
|
||||
static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
|
||||
{
|
||||
|
|
|
@ -1595,6 +1595,8 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
|
|||
tcp_move_syn(newtp, req);
|
||||
ireq->ireq_opt = NULL;
|
||||
} else {
|
||||
newinet->inet_opt = NULL;
|
||||
|
||||
if (!req_unhash && found_dup_sk) {
|
||||
/* This code path should only be executed in the
|
||||
* syncookie case only
|
||||
|
@ -1602,8 +1604,6 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
|
|||
bh_unlock_sock(newsk);
|
||||
sock_put(newsk);
|
||||
newsk = NULL;
|
||||
} else {
|
||||
newinet->inet_opt = NULL;
|
||||
}
|
||||
}
|
||||
return newsk;
|
||||
|
@ -1760,6 +1760,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
|
|||
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
|
||||
u32 tail_gso_size, tail_gso_segs;
|
||||
struct skb_shared_info *shinfo;
|
||||
const struct tcphdr *th;
|
||||
struct tcphdr *thtail;
|
||||
|
@ -1767,6 +1768,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
|
|||
unsigned int hdrlen;
|
||||
bool fragstolen;
|
||||
u32 gso_segs;
|
||||
u32 gso_size;
|
||||
int delta;
|
||||
|
||||
/* In case all data was pulled from skb frags (in __pskb_pull_tail()),
|
||||
|
@ -1792,13 +1794,6 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
|
|||
*/
|
||||
th = (const struct tcphdr *)skb->data;
|
||||
hdrlen = th->doff * 4;
|
||||
shinfo = skb_shinfo(skb);
|
||||
|
||||
if (!shinfo->gso_size)
|
||||
shinfo->gso_size = skb->len - hdrlen;
|
||||
|
||||
if (!shinfo->gso_segs)
|
||||
shinfo->gso_segs = 1;
|
||||
|
||||
tail = sk->sk_backlog.tail;
|
||||
if (!tail)
|
||||
|
@ -1821,6 +1816,15 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
|
|||
goto no_coalesce;
|
||||
|
||||
__skb_pull(skb, hdrlen);
|
||||
|
||||
shinfo = skb_shinfo(skb);
|
||||
gso_size = shinfo->gso_size ?: skb->len;
|
||||
gso_segs = shinfo->gso_segs ?: 1;
|
||||
|
||||
shinfo = skb_shinfo(tail);
|
||||
tail_gso_size = shinfo->gso_size ?: (tail->len - hdrlen);
|
||||
tail_gso_segs = shinfo->gso_segs ?: 1;
|
||||
|
||||
if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
|
||||
TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
|
||||
|
||||
|
@ -1847,11 +1851,8 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
|
|||
}
|
||||
|
||||
/* Not as strict as GRO. We only need to carry mss max value */
|
||||
skb_shinfo(tail)->gso_size = max(shinfo->gso_size,
|
||||
skb_shinfo(tail)->gso_size);
|
||||
|
||||
gso_segs = skb_shinfo(tail)->gso_segs + shinfo->gso_segs;
|
||||
skb_shinfo(tail)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
|
||||
shinfo->gso_size = max(gso_size, tail_gso_size);
|
||||
shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF);
|
||||
|
||||
sk->sk_backlog.len += delta;
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
|
|
|
@ -4084,6 +4084,7 @@ void tcp_send_probe0(struct sock *sk)
|
|||
/* Cancel probe timer, if it is not required. */
|
||||
icsk->icsk_probes_out = 0;
|
||||
icsk->icsk_backoff = 0;
|
||||
icsk->icsk_probes_tstamp = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -219,14 +219,8 @@ static int tcp_write_timeout(struct sock *sk)
|
|||
int retry_until;
|
||||
|
||||
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
|
||||
if (icsk->icsk_retransmits) {
|
||||
dst_negative_advice(sk);
|
||||
} else {
|
||||
sk_rethink_txhash(sk);
|
||||
tp->timeout_rehash++;
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPTIMEOUTREHASH);
|
||||
}
|
||||
if (icsk->icsk_retransmits)
|
||||
__dst_negative_advice(sk);
|
||||
retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
|
||||
expired = icsk->icsk_retransmits >= retry_until;
|
||||
} else {
|
||||
|
@ -234,12 +228,7 @@ static int tcp_write_timeout(struct sock *sk)
|
|||
/* Black hole detection */
|
||||
tcp_mtu_probing(icsk, sk);
|
||||
|
||||
dst_negative_advice(sk);
|
||||
} else {
|
||||
sk_rethink_txhash(sk);
|
||||
tp->timeout_rehash++;
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPTIMEOUTREHASH);
|
||||
__dst_negative_advice(sk);
|
||||
}
|
||||
|
||||
retry_until = net->ipv4.sysctl_tcp_retries2;
|
||||
|
@ -270,6 +259,11 @@ static int tcp_write_timeout(struct sock *sk)
|
|||
return 1;
|
||||
}
|
||||
|
||||
if (sk_rethink_txhash(sk)) {
|
||||
tp->timeout_rehash++;
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -349,6 +343,7 @@ static void tcp_probe_timer(struct sock *sk)
|
|||
|
||||
if (tp->packets_out || !skb) {
|
||||
icsk->icsk_probes_out = 0;
|
||||
icsk->icsk_probes_tstamp = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -360,13 +355,12 @@ static void tcp_probe_timer(struct sock *sk)
|
|||
* corresponding system limit. We also implement similar policy when
|
||||
* we use RTO to probe window in tcp_retransmit_timer().
|
||||
*/
|
||||
if (icsk->icsk_user_timeout) {
|
||||
u32 elapsed = tcp_model_timeout(sk, icsk->icsk_probes_out,
|
||||
tcp_probe0_base(sk));
|
||||
|
||||
if (elapsed >= icsk->icsk_user_timeout)
|
||||
if (!icsk->icsk_probes_tstamp)
|
||||
icsk->icsk_probes_tstamp = tcp_jiffies32;
|
||||
else if (icsk->icsk_user_timeout &&
|
||||
(s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >=
|
||||
msecs_to_jiffies(icsk->icsk_user_timeout))
|
||||
goto abort;
|
||||
}
|
||||
|
||||
max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
|
||||
if (sock_flag(sk, SOCK_DEAD)) {
|
||||
|
|
|
@ -2555,7 +2555,8 @@ int udp_v4_early_demux(struct sk_buff *skb)
|
|||
*/
|
||||
if (!inet_sk(sk)->inet_daddr && in_dev)
|
||||
return ip_mc_validate_source(skb, iph->daddr,
|
||||
iph->saddr, iph->tos,
|
||||
iph->saddr,
|
||||
iph->tos & IPTOS_RT_MASK,
|
||||
skb->dev, in_dev, &itag);
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -2467,8 +2467,9 @@ static void addrconf_add_mroute(struct net_device *dev)
|
|||
.fc_ifindex = dev->ifindex,
|
||||
.fc_dst_len = 8,
|
||||
.fc_flags = RTF_UP,
|
||||
.fc_type = RTN_UNICAST,
|
||||
.fc_type = RTN_MULTICAST,
|
||||
.fc_nlinfo.nl_net = dev_net(dev),
|
||||
.fc_protocol = RTPROT_KERNEL,
|
||||
};
|
||||
|
||||
ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
|
||||
|
|
|
@ -120,18 +120,17 @@ static ssize_t aqm_write(struct file *file,
|
|||
{
|
||||
struct ieee80211_local *local = file->private_data;
|
||||
char buf[100];
|
||||
size_t len;
|
||||
|
||||
if (count > sizeof(buf))
|
||||
if (count >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(buf, user_buf, count))
|
||||
return -EFAULT;
|
||||
|
||||
buf[sizeof(buf) - 1] = '\0';
|
||||
len = strlen(buf);
|
||||
if (len > 0 && buf[len-1] == '\n')
|
||||
buf[len-1] = 0;
|
||||
if (count && buf[count - 1] == '\n')
|
||||
buf[count - 1] = '\0';
|
||||
else
|
||||
buf[count] = '\0';
|
||||
|
||||
if (sscanf(buf, "fq_limit %u", &local->fq.limit) == 1)
|
||||
return count;
|
||||
|
@ -177,18 +176,17 @@ static ssize_t airtime_flags_write(struct file *file,
|
|||
{
|
||||
struct ieee80211_local *local = file->private_data;
|
||||
char buf[16];
|
||||
size_t len;
|
||||
|
||||
if (count > sizeof(buf))
|
||||
if (count >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(buf, user_buf, count))
|
||||
return -EFAULT;
|
||||
|
||||
buf[sizeof(buf) - 1] = 0;
|
||||
len = strlen(buf);
|
||||
if (len > 0 && buf[len - 1] == '\n')
|
||||
buf[len - 1] = 0;
|
||||
if (count && buf[count - 1] == '\n')
|
||||
buf[count - 1] = '\0';
|
||||
else
|
||||
buf[count] = '\0';
|
||||
|
||||
if (kstrtou16(buf, 0, &local->airtime_flags))
|
||||
return -EINVAL;
|
||||
|
@ -237,20 +235,19 @@ static ssize_t aql_txq_limit_write(struct file *file,
|
|||
{
|
||||
struct ieee80211_local *local = file->private_data;
|
||||
char buf[100];
|
||||
size_t len;
|
||||
u32 ac, q_limit_low, q_limit_high, q_limit_low_old, q_limit_high_old;
|
||||
struct sta_info *sta;
|
||||
|
||||
if (count > sizeof(buf))
|
||||
if (count >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(buf, user_buf, count))
|
||||
return -EFAULT;
|
||||
|
||||
buf[sizeof(buf) - 1] = 0;
|
||||
len = strlen(buf);
|
||||
if (len > 0 && buf[len - 1] == '\n')
|
||||
buf[len - 1] = 0;
|
||||
if (count && buf[count - 1] == '\n')
|
||||
buf[count - 1] = '\0';
|
||||
else
|
||||
buf[count] = '\0';
|
||||
|
||||
if (sscanf(buf, "%u %u %u", &ac, &q_limit_low, &q_limit_high) != 3)
|
||||
return -EINVAL;
|
||||
|
@ -306,18 +303,17 @@ static ssize_t force_tx_status_write(struct file *file,
|
|||
{
|
||||
struct ieee80211_local *local = file->private_data;
|
||||
char buf[3];
|
||||
size_t len;
|
||||
|
||||
if (count > sizeof(buf))
|
||||
if (count >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(buf, user_buf, count))
|
||||
return -EFAULT;
|
||||
|
||||
buf[sizeof(buf) - 1] = '\0';
|
||||
len = strlen(buf);
|
||||
if (len > 0 && buf[len - 1] == '\n')
|
||||
buf[len - 1] = 0;
|
||||
if (count && buf[count - 1] == '\n')
|
||||
buf[count - 1] = '\0';
|
||||
else
|
||||
buf[count] = '\0';
|
||||
|
||||
if (buf[0] == '0' && buf[1] == '\0')
|
||||
local->force_tx_status = 0;
|
||||
|
|
|
@ -4176,6 +4176,8 @@ void ieee80211_check_fast_rx(struct sta_info *sta)
|
|||
|
||||
rcu_read_lock();
|
||||
key = rcu_dereference(sta->ptk[sta->ptk_idx]);
|
||||
if (!key)
|
||||
key = rcu_dereference(sdata->default_unicast_key);
|
||||
if (key) {
|
||||
switch (key->conf.cipher) {
|
||||
case WLAN_CIPHER_SUITE_TKIP:
|
||||
|
|
|
@ -649,7 +649,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
|
|||
if (!skip_hw && tx->key &&
|
||||
tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
|
||||
info->control.hw_key = &tx->key->conf;
|
||||
} else if (!ieee80211_is_mgmt(hdr->frame_control) && tx->sta &&
|
||||
} else if (ieee80211_is_data_present(hdr->frame_control) && tx->sta &&
|
||||
test_sta_flag(tx->sta, WLAN_STA_USES_ENCRYPTION)) {
|
||||
return TX_DROP;
|
||||
}
|
||||
|
@ -3809,7 +3809,7 @@ void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
|
|||
* get immediately moved to the back of the list on the next
|
||||
* call to ieee80211_next_txq().
|
||||
*/
|
||||
if (txqi->txq.sta &&
|
||||
if (txqi->txq.sta && local->airtime_flags &&
|
||||
wiphy_ext_feature_isset(local->hw.wiphy,
|
||||
NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
|
||||
list_add(&txqi->schedule_order,
|
||||
|
@ -4251,7 +4251,6 @@ netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
|
|||
struct ethhdr *ehdr = (struct ethhdr *)skb->data;
|
||||
struct ieee80211_key *key;
|
||||
struct sta_info *sta;
|
||||
bool offload = true;
|
||||
|
||||
if (unlikely(skb->len < ETH_HLEN)) {
|
||||
kfree_skb(skb);
|
||||
|
@ -4268,17 +4267,21 @@ netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
|
|||
if (unlikely(IS_ERR_OR_NULL(sta) || !sta->uploaded ||
|
||||
!test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
|
||||
sdata->control_port_protocol == ehdr->h_proto))
|
||||
offload = false;
|
||||
else if ((key = rcu_dereference(sta->ptk[sta->ptk_idx])) &&
|
||||
(!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) ||
|
||||
goto skip_offload;
|
||||
|
||||
key = rcu_dereference(sta->ptk[sta->ptk_idx]);
|
||||
if (!key)
|
||||
key = rcu_dereference(sdata->default_unicast_key);
|
||||
|
||||
if (key && (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) ||
|
||||
key->conf.cipher == WLAN_CIPHER_SUITE_TKIP))
|
||||
offload = false;
|
||||
goto skip_offload;
|
||||
|
||||
if (offload)
|
||||
ieee80211_8023_xmit(sdata, dev, sta, key, skb);
|
||||
else
|
||||
ieee80211_subif_start_xmit(skb, dev);
|
||||
goto out;
|
||||
|
||||
skip_offload:
|
||||
ieee80211_subif_start_xmit(skb, dev);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
|
||||
|
|
|
@ -508,7 +508,7 @@ static int nci_open_device(struct nci_dev *ndev)
|
|||
};
|
||||
unsigned long opt = 0;
|
||||
|
||||
if (!(ndev->nci_ver & NCI_VER_2_MASK))
|
||||
if (ndev->nci_ver & NCI_VER_2_MASK)
|
||||
opt = (unsigned long)&nci_init_v2_cmd;
|
||||
|
||||
rc = __nci_request(ndev, nci_init_req, opt,
|
||||
|
|
|
@ -1272,6 +1272,10 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
|
|||
|
||||
nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
|
||||
msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
|
||||
if (!nla_ok(nla_opt_msk, msk_depth)) {
|
||||
NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
nla_for_each_attr(nla_opt_key, nla_enc_key,
|
||||
|
@ -1307,9 +1311,6 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
|
|||
NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (msk_depth)
|
||||
nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
|
||||
break;
|
||||
case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
|
||||
if (key->enc_opts.dst_opt_type) {
|
||||
|
@ -1340,9 +1341,6 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
|
|||
NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (msk_depth)
|
||||
nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
|
||||
break;
|
||||
case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
|
||||
if (key->enc_opts.dst_opt_type) {
|
||||
|
@ -1373,14 +1371,20 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
|
|||
NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (msk_depth)
|
||||
nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
|
||||
break;
|
||||
default:
|
||||
NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!msk_depth)
|
||||
continue;
|
||||
|
||||
if (!nla_ok(nla_opt_msk, msk_depth)) {
|
||||
NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
|
||||
return -EINVAL;
|
||||
}
|
||||
nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -366,9 +366,13 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
|||
if (tb[TCA_TCINDEX_MASK])
|
||||
cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
|
||||
|
||||
if (tb[TCA_TCINDEX_SHIFT])
|
||||
if (tb[TCA_TCINDEX_SHIFT]) {
|
||||
cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
|
||||
|
||||
if (cp->shift > 16) {
|
||||
err = -EINVAL;
|
||||
goto errout;
|
||||
}
|
||||
}
|
||||
if (!cp->hash) {
|
||||
/* Hash not specified, use perfect hash if the upper limit
|
||||
* of the hashing index is below the threshold.
|
||||
|
|
|
@ -412,7 +412,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
|
|||
{
|
||||
struct qdisc_rate_table *rtab;
|
||||
|
||||
if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
|
||||
if (tab == NULL || r->rate == 0 ||
|
||||
r->cell_log == 0 || r->cell_log >= 32 ||
|
||||
nla_len(tab) != TC_RTAB_SIZE) {
|
||||
NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
|
||||
return NULL;
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
* Copyright 2008-2011 Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
|
||||
* Copyright 2013-2014 Intel Mobile Communications GmbH
|
||||
* Copyright 2017 Intel Deutschland GmbH
|
||||
* Copyright (C) 2018 - 2019 Intel Corporation
|
||||
* Copyright (C) 2018 - 2021 Intel Corporation
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
|
@ -139,6 +139,11 @@ static const struct ieee80211_regdomain *get_cfg80211_regdom(void)
|
|||
return rcu_dereference_rtnl(cfg80211_regdomain);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the regulatory domain associated with the wiphy.
|
||||
*
|
||||
* Requires either RTNL or RCU protection
|
||||
*/
|
||||
const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy)
|
||||
{
|
||||
return rcu_dereference_rtnl(wiphy->regd);
|
||||
|
@ -2571,9 +2576,13 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
|
|||
if (IS_ERR(new_regd))
|
||||
return;
|
||||
|
||||
rtnl_lock();
|
||||
|
||||
tmp = get_wiphy_regdom(wiphy);
|
||||
rcu_assign_pointer(wiphy->regd, new_regd);
|
||||
rcu_free_regdom(tmp);
|
||||
|
||||
rtnl_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
|
||||
|
||||
|
|
|
@ -108,9 +108,9 @@ EXPORT_SYMBOL(xsk_get_pool_from_qid);
|
|||
|
||||
void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
|
||||
{
|
||||
if (queue_id < dev->real_num_rx_queues)
|
||||
if (queue_id < dev->num_rx_queues)
|
||||
dev->_rx[queue_id].pool = NULL;
|
||||
if (queue_id < dev->real_num_tx_queues)
|
||||
if (queue_id < dev->num_tx_queues)
|
||||
dev->_tx[queue_id].pool = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -240,11 +240,6 @@ static int btf_parse_hdr(struct btf *btf)
|
|||
}
|
||||
|
||||
meta_left = btf->raw_size - sizeof(*hdr);
|
||||
if (!meta_left) {
|
||||
pr_debug("BTF has no data\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (meta_left < hdr->str_off + hdr->str_len) {
|
||||
pr_debug("Invalid BTF total size:%u\n", btf->raw_size);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -34,61 +34,6 @@ struct storage {
|
|||
struct bpf_spin_lock lock;
|
||||
};
|
||||
|
||||
/* Copies an rm binary to a temp file. dest is a mkstemp template */
|
||||
static int copy_rm(char *dest)
|
||||
{
|
||||
int fd_in, fd_out = -1, ret = 0;
|
||||
struct stat stat;
|
||||
char *buf = NULL;
|
||||
|
||||
fd_in = open("/bin/rm", O_RDONLY);
|
||||
if (fd_in < 0)
|
||||
return -errno;
|
||||
|
||||
fd_out = mkstemp(dest);
|
||||
if (fd_out < 0) {
|
||||
ret = -errno;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = fstat(fd_in, &stat);
|
||||
if (ret == -1) {
|
||||
ret = -errno;
|
||||
goto out;
|
||||
}
|
||||
|
||||
buf = malloc(stat.st_blksize);
|
||||
if (!buf) {
|
||||
ret = -errno;
|
||||
goto out;
|
||||
}
|
||||
|
||||
while (ret = read(fd_in, buf, stat.st_blksize), ret > 0) {
|
||||
ret = write(fd_out, buf, ret);
|
||||
if (ret < 0) {
|
||||
ret = -errno;
|
||||
goto out;
|
||||
|
||||
}
|
||||
}
|
||||
if (ret < 0) {
|
||||
ret = -errno;
|
||||
goto out;
|
||||
|
||||
}
|
||||
|
||||
/* Set executable permission on the copied file */
|
||||
ret = chmod(dest, 0100);
|
||||
if (ret == -1)
|
||||
ret = -errno;
|
||||
|
||||
out:
|
||||
free(buf);
|
||||
close(fd_in);
|
||||
close(fd_out);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Fork and exec the provided rm binary and return the exit code of the
|
||||
* forked process and its pid.
|
||||
*/
|
||||
|
@ -168,9 +113,11 @@ static bool check_syscall_operations(int map_fd, int obj_fd)
|
|||
|
||||
void test_test_local_storage(void)
|
||||
{
|
||||
char tmp_exec_path[PATH_MAX] = "/tmp/copy_of_rmXXXXXX";
|
||||
char tmp_dir_path[64] = "/tmp/local_storageXXXXXX";
|
||||
int err, serv_sk = -1, task_fd = -1, rm_fd = -1;
|
||||
struct local_storage *skel = NULL;
|
||||
char tmp_exec_path[64];
|
||||
char cmd[256];
|
||||
|
||||
skel = local_storage__open_and_load();
|
||||
if (CHECK(!skel, "skel_load", "lsm skeleton failed\n"))
|
||||
|
@ -189,18 +136,24 @@ void test_test_local_storage(void)
|
|||
task_fd))
|
||||
goto close_prog;
|
||||
|
||||
err = copy_rm(tmp_exec_path);
|
||||
if (CHECK(err < 0, "copy_rm", "err %d errno %d\n", err, errno))
|
||||
if (CHECK(!mkdtemp(tmp_dir_path), "mkdtemp",
|
||||
"unable to create tmpdir: %d\n", errno))
|
||||
goto close_prog;
|
||||
|
||||
snprintf(tmp_exec_path, sizeof(tmp_exec_path), "%s/copy_of_rm",
|
||||
tmp_dir_path);
|
||||
snprintf(cmd, sizeof(cmd), "cp /bin/rm %s", tmp_exec_path);
|
||||
if (CHECK_FAIL(system(cmd)))
|
||||
goto close_prog_rmdir;
|
||||
|
||||
rm_fd = open(tmp_exec_path, O_RDONLY);
|
||||
if (CHECK(rm_fd < 0, "open", "failed to open %s err:%d, errno:%d",
|
||||
tmp_exec_path, rm_fd, errno))
|
||||
goto close_prog;
|
||||
goto close_prog_rmdir;
|
||||
|
||||
if (!check_syscall_operations(bpf_map__fd(skel->maps.inode_storage_map),
|
||||
rm_fd))
|
||||
goto close_prog;
|
||||
goto close_prog_rmdir;
|
||||
|
||||
/* Sets skel->bss->monitored_pid to the pid of the forked child
|
||||
* forks a child process that executes tmp_exec_path and tries to
|
||||
|
@ -209,33 +162,36 @@ void test_test_local_storage(void)
|
|||
*/
|
||||
err = run_self_unlink(&skel->bss->monitored_pid, tmp_exec_path);
|
||||
if (CHECK(err != EPERM, "run_self_unlink", "err %d want EPERM\n", err))
|
||||
goto close_prog_unlink;
|
||||
goto close_prog_rmdir;
|
||||
|
||||
/* Set the process being monitored to be the current process */
|
||||
skel->bss->monitored_pid = getpid();
|
||||
|
||||
/* Remove the temporary created executable */
|
||||
err = unlink(tmp_exec_path);
|
||||
if (CHECK(err != 0, "unlink", "unable to unlink %s: %d", tmp_exec_path,
|
||||
errno))
|
||||
goto close_prog_unlink;
|
||||
/* Move copy_of_rm to a new location so that it triggers the
|
||||
* inode_rename LSM hook with a new_dentry that has a NULL inode ptr.
|
||||
*/
|
||||
snprintf(cmd, sizeof(cmd), "mv %s/copy_of_rm %s/check_null_ptr",
|
||||
tmp_dir_path, tmp_dir_path);
|
||||
if (CHECK_FAIL(system(cmd)))
|
||||
goto close_prog_rmdir;
|
||||
|
||||
CHECK(skel->data->inode_storage_result != 0, "inode_storage_result",
|
||||
"inode_local_storage not set\n");
|
||||
|
||||
serv_sk = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
|
||||
if (CHECK(serv_sk < 0, "start_server", "failed to start server\n"))
|
||||
goto close_prog;
|
||||
goto close_prog_rmdir;
|
||||
|
||||
CHECK(skel->data->sk_storage_result != 0, "sk_storage_result",
|
||||
"sk_local_storage not set\n");
|
||||
|
||||
if (!check_syscall_operations(bpf_map__fd(skel->maps.sk_storage_map),
|
||||
serv_sk))
|
||||
goto close_prog;
|
||||
goto close_prog_rmdir;
|
||||
|
||||
close_prog_unlink:
|
||||
unlink(tmp_exec_path);
|
||||
close_prog_rmdir:
|
||||
snprintf(cmd, sizeof(cmd), "rm -rf %s", tmp_dir_path);
|
||||
system(cmd);
|
||||
close_prog:
|
||||
close(serv_sk);
|
||||
close(rm_fd);
|
||||
|
|
|
@ -50,7 +50,6 @@ int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim)
|
|||
__u32 pid = bpf_get_current_pid_tgid() >> 32;
|
||||
struct local_storage *storage;
|
||||
bool is_self_unlink;
|
||||
int err;
|
||||
|
||||
if (pid != monitored_pid)
|
||||
return 0;
|
||||
|
@ -66,8 +65,27 @@ int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim)
|
|||
return -EPERM;
|
||||
}
|
||||
|
||||
storage = bpf_inode_storage_get(&inode_storage_map, victim->d_inode, 0,
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("lsm/inode_rename")
|
||||
int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry,
|
||||
struct inode *new_dir, struct dentry *new_dentry,
|
||||
unsigned int flags)
|
||||
{
|
||||
__u32 pid = bpf_get_current_pid_tgid() >> 32;
|
||||
struct local_storage *storage;
|
||||
int err;
|
||||
|
||||
/* new_dentry->d_inode can be NULL when the inode is renamed to a file
|
||||
* that did not exist before. The helper should be able to handle this
|
||||
* NULL pointer.
|
||||
*/
|
||||
bpf_inode_storage_get(&inode_storage_map, new_dentry->d_inode, 0,
|
||||
BPF_LOCAL_STORAGE_GET_F_CREATE);
|
||||
|
||||
storage = bpf_inode_storage_get(&inode_storage_map, old_dentry->d_inode,
|
||||
0, 0);
|
||||
if (!storage)
|
||||
return 0;
|
||||
|
||||
|
@ -76,7 +94,7 @@ int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim)
|
|||
inode_storage_result = -1;
|
||||
bpf_spin_unlock(&storage->lock);
|
||||
|
||||
err = bpf_inode_storage_delete(&inode_storage_map, victim->d_inode);
|
||||
err = bpf_inode_storage_delete(&inode_storage_map, old_dentry->d_inode);
|
||||
if (!err)
|
||||
inode_storage_result = err;
|
||||
|
||||
|
@ -133,37 +151,18 @@ int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
|
|||
return 0;
|
||||
}
|
||||
|
||||
SEC("lsm/file_open")
|
||||
int BPF_PROG(file_open, struct file *file)
|
||||
{
|
||||
__u32 pid = bpf_get_current_pid_tgid() >> 32;
|
||||
struct local_storage *storage;
|
||||
|
||||
if (pid != monitored_pid)
|
||||
return 0;
|
||||
|
||||
if (!file->f_inode)
|
||||
return 0;
|
||||
|
||||
storage = bpf_inode_storage_get(&inode_storage_map, file->f_inode, 0,
|
||||
BPF_LOCAL_STORAGE_GET_F_CREATE);
|
||||
if (!storage)
|
||||
return 0;
|
||||
|
||||
bpf_spin_lock(&storage->lock);
|
||||
storage->value = DUMMY_STORAGE_VALUE;
|
||||
bpf_spin_unlock(&storage->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This uses the local storage to remember the inode of the binary that a
|
||||
* process was originally executing.
|
||||
*/
|
||||
SEC("lsm/bprm_committed_creds")
|
||||
void BPF_PROG(exec, struct linux_binprm *bprm)
|
||||
{
|
||||
__u32 pid = bpf_get_current_pid_tgid() >> 32;
|
||||
struct local_storage *storage;
|
||||
|
||||
if (pid != monitored_pid)
|
||||
return;
|
||||
|
||||
storage = bpf_task_storage_get(&task_storage_map,
|
||||
bpf_get_current_task_btf(), 0,
|
||||
BPF_LOCAL_STORAGE_GET_F_CREATE);
|
||||
|
@ -172,4 +171,13 @@ void BPF_PROG(exec, struct linux_binprm *bprm)
|
|||
storage->exec_inode = bprm->file->f_inode;
|
||||
bpf_spin_unlock(&storage->lock);
|
||||
}
|
||||
|
||||
storage = bpf_inode_storage_get(&inode_storage_map, bprm->file->f_inode,
|
||||
0, BPF_LOCAL_STORAGE_GET_F_CREATE);
|
||||
if (!storage)
|
||||
return;
|
||||
|
||||
bpf_spin_lock(&storage->lock);
|
||||
storage->value = DUMMY_STORAGE_VALUE;
|
||||
bpf_spin_unlock(&storage->lock);
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@
|
|||
#define MAX_INSNS BPF_MAXINSNS
|
||||
#define MAX_TEST_INSNS 1000000
|
||||
#define MAX_FIXUPS 8
|
||||
#define MAX_NR_MAPS 20
|
||||
#define MAX_NR_MAPS 21
|
||||
#define MAX_TEST_RUNS 8
|
||||
#define POINTER_VALUE 0xcafe4all
|
||||
#define TEST_DATA_LEN 64
|
||||
|
@ -87,6 +87,7 @@ struct bpf_test {
|
|||
int fixup_sk_storage_map[MAX_FIXUPS];
|
||||
int fixup_map_event_output[MAX_FIXUPS];
|
||||
int fixup_map_reuseport_array[MAX_FIXUPS];
|
||||
int fixup_map_ringbuf[MAX_FIXUPS];
|
||||
const char *errstr;
|
||||
const char *errstr_unpriv;
|
||||
uint32_t insn_processed;
|
||||
|
@ -640,6 +641,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
|
|||
int *fixup_sk_storage_map = test->fixup_sk_storage_map;
|
||||
int *fixup_map_event_output = test->fixup_map_event_output;
|
||||
int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
|
||||
int *fixup_map_ringbuf = test->fixup_map_ringbuf;
|
||||
|
||||
if (test->fill_helper) {
|
||||
test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
|
||||
|
@ -817,6 +819,14 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
|
|||
fixup_map_reuseport_array++;
|
||||
} while (*fixup_map_reuseport_array);
|
||||
}
|
||||
if (*fixup_map_ringbuf) {
|
||||
map_fds[20] = create_map(BPF_MAP_TYPE_RINGBUF, 0,
|
||||
0, 4096);
|
||||
do {
|
||||
prog[*fixup_map_ringbuf].imm = map_fds[20];
|
||||
fixup_map_ringbuf++;
|
||||
} while (*fixup_map_ringbuf);
|
||||
}
|
||||
}
|
||||
|
||||
struct libcap {
|
||||
|
|
|
@ -28,6 +28,36 @@
|
|||
.result = ACCEPT,
|
||||
.result_unpriv = ACCEPT,
|
||||
},
|
||||
{
|
||||
"check valid spill/fill, ptr to mem",
|
||||
.insns = {
|
||||
/* reserve 8 byte ringbuf memory */
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
|
||||
/* store a pointer to the reserved memory in R6 */
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
|
||||
/* check whether the reservation was successful */
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
/* spill R6(mem) into the stack */
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
|
||||
/* fill it back in R7 */
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8),
|
||||
/* should be able to access *(R7) = 0 */
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0),
|
||||
/* submit the reserved ringbuf memory */
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_ringbuf = { 1 },
|
||||
.result = ACCEPT,
|
||||
.result_unpriv = ACCEPT,
|
||||
},
|
||||
{
|
||||
"check corrupted spill/fill",
|
||||
.insns = {
|
||||
|
|
|
@ -1055,7 +1055,6 @@ ipv6_addr_metric_test()
|
|||
|
||||
check_route6 "2001:db8:104::1 dev dummy2 proto kernel metric 260"
|
||||
log_test $? 0 "Set metric with peer route on local side"
|
||||
log_test $? 0 "User specified metric on local address"
|
||||
check_route6 "2001:db8:104::2 dev dummy2 proto kernel metric 260"
|
||||
log_test $? 0 "Set metric with peer route on peer side"
|
||||
|
||||
|
|
Loading…
Reference in New Issue