Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "Fixes all over: 1) Netdev refcnt leak in nf_flow_table, from Taehee Yoo. 2) Fix RCU usage in nf_tables, from Florian Westphal. 3) Fix DSA build when NET_DSA_TAG_BRCM_PREPEND is not set, from Yue Haibing. 4) Add missing page read/write ops to realtek driver, from Heiner Kallweit. 5) Endianness fix in qrtr code, from Nicholas Mc Guire. 6) Fix various bugs in DSA_SKB_* macros, from Vladimir Oltean. 7) Several BPF documentation cures, from Quentin Monnet. 8) Fix undefined behavior in narrow load handling of BPF verifier, from Krzesimir Nowak. 9) DMA ops crash in SGI Seeq driver due to not set netdev parent device pointer, from Thomas Bogendoerfer. 10) Flow dissector has to disable preemption when invoking BPF program, from Eric Dumazet" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (48 commits) net: ethernet: stmmac: dwmac-sun8i: enable support of unicast filtering net: ethernet: ti: netcp_ethss: fix build flow_dissector: disable preemption around BPF calls bonding: fix arp_validate toggling in active-backup mode net: meson: fixup g12a glue ephy id net: phy: realtek: Replace phy functions with non-locked version in rtl8211e_config_init() net: seeq: fix crash caused by not set dev.parent of_net: Fix missing of_find_device_by_node ref count drop net: mvpp2: cls: Add missing NETIF_F_NTUPLE flag bpf: fix undefined behavior in narrow load handling libbpf: detect supported kernel BTF features and sanitize BTF selftests: bpf: Add files generated after build to .gitignore tools: bpf: synchronise BPF UAPI header with tools bpf: fix minor issues in documentation for BPF helpers. bpf: fix recurring typo in documentation for BPF helpers bpf: fix script for generating man page on BPF helpers bpf: add various test cases for backward jumps net: dccp : proto: remove Unneeded variable "err" net: dsa: Remove the now unused DSA_SKB_CB_COPY() macro net: dsa: Remove dangerous DSA_SKB_CLONE() macro ...
This commit is contained in:
commit
a3958f5e13
|
@ -139,9 +139,9 @@ Optional properties:
|
||||||
sub-module attached to this interface.
|
sub-module attached to this interface.
|
||||||
|
|
||||||
The MAC address will be determined using the optional properties defined in
|
The MAC address will be determined using the optional properties defined in
|
||||||
ethernet.txt, as provided by the of_get_mac_address API and only if efuse-mac
|
ethernet.txt and only if efuse-mac is set to 0. If all of the optional MAC
|
||||||
is set to 0. If any of the optional MAC address properties are not present,
|
address properties are not present, then the driver will use a random MAC
|
||||||
then the driver will use random MAC address.
|
address.
|
||||||
|
|
||||||
Example binding:
|
Example binding:
|
||||||
|
|
||||||
|
|
|
@ -16,8 +16,8 @@ Optional properties:
|
||||||
- ieee80211-freq-limit: See ieee80211.txt
|
- ieee80211-freq-limit: See ieee80211.txt
|
||||||
- mediatek,mtd-eeprom: Specify a MTD partition + offset containing EEPROM data
|
- mediatek,mtd-eeprom: Specify a MTD partition + offset containing EEPROM data
|
||||||
|
|
||||||
The driver is using of_get_mac_address API, so the MAC address can be as well
|
The MAC address can as well be set with corresponding optional properties
|
||||||
be set with corresponding optional properties defined in net/ethernet.txt.
|
defined in net/ethernet.txt.
|
||||||
|
|
||||||
Optional nodes:
|
Optional nodes:
|
||||||
- led: Properties for a connected LED
|
- led: Properties for a connected LED
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
#include <linux/irq.h>
|
#include <linux/irq.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
|
#include <linux/etherdevice.h>
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
#include <linux/of_net.h>
|
#include <linux/of_net.h>
|
||||||
#include <asm/tsi108.h>
|
#include <asm/tsi108.h>
|
||||||
|
@ -106,7 +107,7 @@ static int __init tsi108_eth_of_init(void)
|
||||||
|
|
||||||
mac_addr = of_get_mac_address(np);
|
mac_addr = of_get_mac_address(np);
|
||||||
if (!IS_ERR(mac_addr))
|
if (!IS_ERR(mac_addr))
|
||||||
memcpy(tsi_eth_data.mac_addr, mac_addr, 6);
|
ether_addr_copy(tsi_eth_data.mac_addr, mac_addr);
|
||||||
|
|
||||||
ph = of_get_property(np, "mdio-handle", NULL);
|
ph = of_get_property(np, "mdio-handle", NULL);
|
||||||
mdio = of_find_node_by_phandle(*ph);
|
mdio = of_find_node_by_phandle(*ph);
|
||||||
|
|
|
@ -1098,13 +1098,6 @@ static int bond_option_arp_validate_set(struct bonding *bond,
|
||||||
{
|
{
|
||||||
netdev_dbg(bond->dev, "Setting arp_validate to %s (%llu)\n",
|
netdev_dbg(bond->dev, "Setting arp_validate to %s (%llu)\n",
|
||||||
newval->string, newval->value);
|
newval->string, newval->value);
|
||||||
|
|
||||||
if (bond->dev->flags & IFF_UP) {
|
|
||||||
if (!newval->value)
|
|
||||||
bond->recv_probe = NULL;
|
|
||||||
else if (bond->params.arp_interval)
|
|
||||||
bond->recv_probe = bond_arp_rcv;
|
|
||||||
}
|
|
||||||
bond->params.arp_validate = newval->value;
|
bond->params.arp_validate = newval->value;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -871,7 +871,7 @@ static int emac_probe(struct platform_device *pdev)
|
||||||
/* Read MAC-address from DT */
|
/* Read MAC-address from DT */
|
||||||
mac_addr = of_get_mac_address(np);
|
mac_addr = of_get_mac_address(np);
|
||||||
if (!IS_ERR(mac_addr))
|
if (!IS_ERR(mac_addr))
|
||||||
memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
|
ether_addr_copy(ndev->dev_addr, mac_addr);
|
||||||
|
|
||||||
/* Check if the MAC address is valid, if not get a random one */
|
/* Check if the MAC address is valid, if not get a random one */
|
||||||
if (!is_valid_ether_addr(ndev->dev_addr)) {
|
if (!is_valid_ether_addr(ndev->dev_addr)) {
|
||||||
|
|
|
@ -961,7 +961,7 @@ int arc_emac_probe(struct net_device *ndev, int interface)
|
||||||
mac_addr = of_get_mac_address(dev->of_node);
|
mac_addr = of_get_mac_address(dev->of_node);
|
||||||
|
|
||||||
if (!IS_ERR(mac_addr))
|
if (!IS_ERR(mac_addr))
|
||||||
memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
|
ether_addr_copy(ndev->dev_addr, mac_addr);
|
||||||
else
|
else
|
||||||
eth_hw_addr_random(ndev);
|
eth_hw_addr_random(ndev);
|
||||||
|
|
||||||
|
|
|
@ -1504,7 +1504,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
|
||||||
mac = of_get_mac_address(pdev->dev.of_node);
|
mac = of_get_mac_address(pdev->dev.of_node);
|
||||||
|
|
||||||
if (!IS_ERR(mac))
|
if (!IS_ERR(mac))
|
||||||
memcpy(netdev->dev_addr, mac, ETH_ALEN);
|
ether_addr_copy(netdev->dev_addr, mac);
|
||||||
else
|
else
|
||||||
eth_hw_addr_random(netdev);
|
eth_hw_addr_random(netdev);
|
||||||
|
|
||||||
|
|
|
@ -1413,7 +1413,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
|
||||||
|
|
||||||
mac_addr = of_get_mac_address(np);
|
mac_addr = of_get_mac_address(np);
|
||||||
if (!IS_ERR(mac_addr))
|
if (!IS_ERR(mac_addr))
|
||||||
memcpy(pdata->dev_addr, mac_addr, sizeof(pdata->dev_addr));
|
ether_addr_copy(pdata->dev_addr, mac_addr);
|
||||||
|
|
||||||
return pdata;
|
return pdata;
|
||||||
}
|
}
|
||||||
|
|
|
@ -903,7 +903,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
|
||||||
*/
|
*/
|
||||||
mac_addr = of_get_mac_address(np);
|
mac_addr = of_get_mac_address(np);
|
||||||
if (!IS_ERR(mac_addr)) {
|
if (!IS_ERR(mac_addr)) {
|
||||||
memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
|
ether_addr_copy(ndev->dev_addr, mac_addr);
|
||||||
} else {
|
} else {
|
||||||
struct mpc52xx_fec __iomem *fec = priv->fec;
|
struct mpc52xx_fec __iomem *fec = priv->fec;
|
||||||
|
|
||||||
|
|
|
@ -729,7 +729,7 @@ static int mac_probe(struct platform_device *_of_dev)
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto _return_of_get_parent;
|
goto _return_of_get_parent;
|
||||||
}
|
}
|
||||||
memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr));
|
ether_addr_copy(mac_dev->addr, mac_addr);
|
||||||
|
|
||||||
/* Get the port handles */
|
/* Get the port handles */
|
||||||
nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL);
|
nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL);
|
||||||
|
|
|
@ -1015,7 +1015,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
|
||||||
|
|
||||||
mac_addr = of_get_mac_address(ofdev->dev.of_node);
|
mac_addr = of_get_mac_address(ofdev->dev.of_node);
|
||||||
if (!IS_ERR(mac_addr))
|
if (!IS_ERR(mac_addr))
|
||||||
memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
|
ether_addr_copy(ndev->dev_addr, mac_addr);
|
||||||
|
|
||||||
ret = fep->ops->allocate_bd(ndev);
|
ret = fep->ops->allocate_bd(ndev);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
|
@ -873,7 +873,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
|
||||||
mac_addr = of_get_mac_address(np);
|
mac_addr = of_get_mac_address(np);
|
||||||
|
|
||||||
if (!IS_ERR(mac_addr))
|
if (!IS_ERR(mac_addr))
|
||||||
memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
|
ether_addr_copy(dev->dev_addr, mac_addr);
|
||||||
|
|
||||||
if (model && !strcasecmp(model, "TSEC"))
|
if (model && !strcasecmp(model, "TSEC"))
|
||||||
priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
|
priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
|
||||||
|
|
|
@ -3911,7 +3911,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
|
||||||
|
|
||||||
mac_addr = of_get_mac_address(np);
|
mac_addr = of_get_mac_address(np);
|
||||||
if (!IS_ERR(mac_addr))
|
if (!IS_ERR(mac_addr))
|
||||||
memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
|
ether_addr_copy(dev->dev_addr, mac_addr);
|
||||||
|
|
||||||
ugeth->ug_info = ug_info;
|
ugeth->ug_info = ug_info;
|
||||||
ugeth->dev = device;
|
ugeth->dev = device;
|
||||||
|
|
|
@ -118,7 +118,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
|
||||||
static int ibmvnic_init(struct ibmvnic_adapter *);
|
static int ibmvnic_init(struct ibmvnic_adapter *);
|
||||||
static int ibmvnic_reset_init(struct ibmvnic_adapter *);
|
static int ibmvnic_reset_init(struct ibmvnic_adapter *);
|
||||||
static void release_crq_queue(struct ibmvnic_adapter *);
|
static void release_crq_queue(struct ibmvnic_adapter *);
|
||||||
static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
|
static int __ibmvnic_set_mac(struct net_device *, u8 *);
|
||||||
static int init_crq_queue(struct ibmvnic_adapter *adapter);
|
static int init_crq_queue(struct ibmvnic_adapter *adapter);
|
||||||
static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
|
static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
|
||||||
|
|
||||||
|
@ -849,11 +849,7 @@ static int ibmvnic_login(struct net_device *netdev)
|
||||||
}
|
}
|
||||||
} while (retry);
|
} while (retry);
|
||||||
|
|
||||||
/* handle pending MAC address changes after successful login */
|
__ibmvnic_set_mac(netdev, adapter->mac_addr);
|
||||||
if (adapter->mac_change_pending) {
|
|
||||||
__ibmvnic_set_mac(netdev, &adapter->desired.mac);
|
|
||||||
adapter->mac_change_pending = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1115,7 +1111,6 @@ static int ibmvnic_open(struct net_device *netdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = __ibmvnic_open(netdev);
|
rc = __ibmvnic_open(netdev);
|
||||||
netif_carrier_on(netdev);
|
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -1686,28 +1681,40 @@ static void ibmvnic_set_multi(struct net_device *netdev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
|
static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
|
||||||
{
|
{
|
||||||
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
||||||
struct sockaddr *addr = p;
|
|
||||||
union ibmvnic_crq crq;
|
union ibmvnic_crq crq;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
if (!is_valid_ether_addr(addr->sa_data))
|
if (!is_valid_ether_addr(dev_addr)) {
|
||||||
return -EADDRNOTAVAIL;
|
rc = -EADDRNOTAVAIL;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
memset(&crq, 0, sizeof(crq));
|
memset(&crq, 0, sizeof(crq));
|
||||||
crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
|
crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
|
||||||
crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
|
crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
|
||||||
ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
|
ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
|
||||||
|
|
||||||
init_completion(&adapter->fw_done);
|
init_completion(&adapter->fw_done);
|
||||||
rc = ibmvnic_send_crq(adapter, &crq);
|
rc = ibmvnic_send_crq(adapter, &crq);
|
||||||
if (rc)
|
if (rc) {
|
||||||
return rc;
|
rc = -EIO;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
wait_for_completion(&adapter->fw_done);
|
wait_for_completion(&adapter->fw_done);
|
||||||
/* netdev->dev_addr is changed in handle_change_mac_rsp function */
|
/* netdev->dev_addr is changed in handle_change_mac_rsp function */
|
||||||
return adapter->fw_done_rc ? -EIO : 0;
|
if (adapter->fw_done_rc) {
|
||||||
|
rc = -EIO;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
err:
|
||||||
|
ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
|
||||||
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ibmvnic_set_mac(struct net_device *netdev, void *p)
|
static int ibmvnic_set_mac(struct net_device *netdev, void *p)
|
||||||
|
@ -1716,13 +1723,10 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
|
||||||
struct sockaddr *addr = p;
|
struct sockaddr *addr = p;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
if (adapter->state == VNIC_PROBED) {
|
rc = 0;
|
||||||
memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
|
ether_addr_copy(adapter->mac_addr, addr->sa_data);
|
||||||
adapter->mac_change_pending = true;
|
if (adapter->state != VNIC_PROBED)
|
||||||
return 0;
|
rc = __ibmvnic_set_mac(netdev, addr->sa_data);
|
||||||
}
|
|
||||||
|
|
||||||
rc = __ibmvnic_set_mac(netdev, addr);
|
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -1859,8 +1863,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
|
||||||
adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
|
adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
|
||||||
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
|
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
|
||||||
|
|
||||||
netif_carrier_on(netdev);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1930,8 +1932,6 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
netif_carrier_on(netdev);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3937,8 +3937,8 @@ static int handle_change_mac_rsp(union ibmvnic_crq *crq,
|
||||||
dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
|
dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
|
ether_addr_copy(netdev->dev_addr,
|
||||||
ETH_ALEN);
|
&crq->change_mac_addr_rsp.mac_addr[0]);
|
||||||
out:
|
out:
|
||||||
complete(&adapter->fw_done);
|
complete(&adapter->fw_done);
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -4475,6 +4475,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
|
||||||
crq->link_state_indication.phys_link_state;
|
crq->link_state_indication.phys_link_state;
|
||||||
adapter->logical_link_state =
|
adapter->logical_link_state =
|
||||||
crq->link_state_indication.logical_link_state;
|
crq->link_state_indication.logical_link_state;
|
||||||
|
if (adapter->phys_link_state && adapter->logical_link_state)
|
||||||
|
netif_carrier_on(netdev);
|
||||||
|
else
|
||||||
|
netif_carrier_off(netdev);
|
||||||
break;
|
break;
|
||||||
case CHANGE_MAC_ADDR_RSP:
|
case CHANGE_MAC_ADDR_RSP:
|
||||||
netdev_dbg(netdev, "Got MAC address change Response\n");
|
netdev_dbg(netdev, "Got MAC address change Response\n");
|
||||||
|
@ -4852,8 +4856,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
||||||
init_completion(&adapter->init_done);
|
init_completion(&adapter->init_done);
|
||||||
adapter->resetting = false;
|
adapter->resetting = false;
|
||||||
|
|
||||||
adapter->mac_change_pending = false;
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
rc = init_crq_queue(adapter);
|
rc = init_crq_queue(adapter);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
|
|
|
@ -969,7 +969,6 @@ struct ibmvnic_tunables {
|
||||||
u64 rx_entries;
|
u64 rx_entries;
|
||||||
u64 tx_entries;
|
u64 tx_entries;
|
||||||
u64 mtu;
|
u64 mtu;
|
||||||
struct sockaddr mac;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ibmvnic_adapter {
|
struct ibmvnic_adapter {
|
||||||
|
@ -1091,7 +1090,6 @@ struct ibmvnic_adapter {
|
||||||
bool resetting;
|
bool resetting;
|
||||||
bool napi_enabled, from_passive_init;
|
bool napi_enabled, from_passive_init;
|
||||||
|
|
||||||
bool mac_change_pending;
|
|
||||||
bool failover_pending;
|
bool failover_pending;
|
||||||
bool force_reset_recovery;
|
bool force_reset_recovery;
|
||||||
|
|
||||||
|
|
|
@ -2750,7 +2750,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
|
||||||
|
|
||||||
mac_addr = of_get_mac_address(pnp);
|
mac_addr = of_get_mac_address(pnp);
|
||||||
if (!IS_ERR(mac_addr))
|
if (!IS_ERR(mac_addr))
|
||||||
memcpy(ppd.mac_addr, mac_addr, ETH_ALEN);
|
ether_addr_copy(ppd.mac_addr, mac_addr);
|
||||||
|
|
||||||
mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
|
mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
|
||||||
mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
|
mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
|
||||||
|
|
|
@ -4565,7 +4565,7 @@ static int mvneta_probe(struct platform_device *pdev)
|
||||||
dt_mac_addr = of_get_mac_address(dn);
|
dt_mac_addr = of_get_mac_address(dn);
|
||||||
if (!IS_ERR(dt_mac_addr)) {
|
if (!IS_ERR(dt_mac_addr)) {
|
||||||
mac_from = "device tree";
|
mac_from = "device tree";
|
||||||
memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
|
ether_addr_copy(dev->dev_addr, dt_mac_addr);
|
||||||
} else {
|
} else {
|
||||||
mvneta_get_mac_addr(pp, hw_mac_addr);
|
mvneta_get_mac_addr(pp, hw_mac_addr);
|
||||||
if (is_valid_ether_addr(hw_mac_addr)) {
|
if (is_valid_ether_addr(hw_mac_addr)) {
|
||||||
|
|
|
@ -5058,8 +5058,10 @@ static int mvpp2_port_probe(struct platform_device *pdev,
|
||||||
dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
|
dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
|
||||||
NETIF_F_HW_VLAN_CTAG_FILTER;
|
NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||||
|
|
||||||
if (mvpp22_rss_is_supported())
|
if (mvpp22_rss_is_supported()) {
|
||||||
dev->hw_features |= NETIF_F_RXHASH;
|
dev->hw_features |= NETIF_F_RXHASH;
|
||||||
|
dev->features |= NETIF_F_NTUPLE;
|
||||||
|
}
|
||||||
|
|
||||||
if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
|
if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
|
||||||
dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
|
dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
|
||||||
|
|
|
@ -4805,7 +4805,7 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
|
||||||
*/
|
*/
|
||||||
iap = of_get_mac_address(hw->pdev->dev.of_node);
|
iap = of_get_mac_address(hw->pdev->dev.of_node);
|
||||||
if (!IS_ERR(iap))
|
if (!IS_ERR(iap))
|
||||||
memcpy(dev->dev_addr, iap, ETH_ALEN);
|
ether_addr_copy(dev->dev_addr, iap);
|
||||||
else
|
else
|
||||||
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8,
|
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8,
|
||||||
ETH_ALEN);
|
ETH_ALEN);
|
||||||
|
|
|
@ -426,7 +426,7 @@ static void ks8851_init_mac(struct ks8851_net *ks)
|
||||||
|
|
||||||
mac_addr = of_get_mac_address(ks->spidev->dev.of_node);
|
mac_addr = of_get_mac_address(ks->spidev->dev.of_node);
|
||||||
if (!IS_ERR(mac_addr)) {
|
if (!IS_ERR(mac_addr)) {
|
||||||
memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
|
ether_addr_copy(dev->dev_addr, mac_addr);
|
||||||
ks8851_write_mac_addr(dev);
|
ks8851_write_mac_addr(dev);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1328,7 +1328,7 @@ static int ks8851_probe(struct platform_device *pdev)
|
||||||
if (pdev->dev.of_node) {
|
if (pdev->dev.of_node) {
|
||||||
mac = of_get_mac_address(pdev->dev.of_node);
|
mac = of_get_mac_address(pdev->dev.of_node);
|
||||||
if (!IS_ERR(mac))
|
if (!IS_ERR(mac))
|
||||||
memcpy(ks->mac_addr, mac, ETH_ALEN);
|
ether_addr_copy(ks->mac_addr, mac);
|
||||||
} else {
|
} else {
|
||||||
struct ks8851_mll_platform_data *pdata;
|
struct ks8851_mll_platform_data *pdata;
|
||||||
|
|
||||||
|
|
|
@ -1369,7 +1369,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
|
||||||
if (!is_valid_ether_addr(ndev->dev_addr)) {
|
if (!is_valid_ether_addr(ndev->dev_addr)) {
|
||||||
const char *macaddr = of_get_mac_address(np);
|
const char *macaddr = of_get_mac_address(np);
|
||||||
if (!IS_ERR(macaddr))
|
if (!IS_ERR(macaddr))
|
||||||
memcpy(ndev->dev_addr, macaddr, ETH_ALEN);
|
ether_addr_copy(ndev->dev_addr, macaddr);
|
||||||
}
|
}
|
||||||
if (!is_valid_ether_addr(ndev->dev_addr))
|
if (!is_valid_ether_addr(ndev->dev_addr))
|
||||||
eth_hw_addr_random(ndev);
|
eth_hw_addr_random(ndev);
|
||||||
|
|
|
@ -3193,7 +3193,7 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
|
||||||
|
|
||||||
mac_addr = of_get_mac_address(np);
|
mac_addr = of_get_mac_address(np);
|
||||||
if (!IS_ERR(mac_addr))
|
if (!IS_ERR(mac_addr))
|
||||||
memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
|
ether_addr_copy(pdata->mac_addr, mac_addr);
|
||||||
|
|
||||||
pdata->no_ether_link =
|
pdata->no_ether_link =
|
||||||
of_property_read_bool(np, "renesas,no-ether-link");
|
of_property_read_bool(np, "renesas,no-ether-link");
|
||||||
|
|
|
@ -735,6 +735,7 @@ static int sgiseeq_probe(struct platform_device *pdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
platform_set_drvdata(pdev, dev);
|
platform_set_drvdata(pdev, dev);
|
||||||
|
SET_NETDEV_DEV(dev, &pdev->dev);
|
||||||
sp = netdev_priv(dev);
|
sp = netdev_priv(dev);
|
||||||
|
|
||||||
/* Make private data page aligned */
|
/* Make private data page aligned */
|
||||||
|
|
|
@ -1015,6 +1015,8 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
|
||||||
mac->mac = &sun8i_dwmac_ops;
|
mac->mac = &sun8i_dwmac_ops;
|
||||||
mac->dma = &sun8i_dwmac_dma_ops;
|
mac->dma = &sun8i_dwmac_dma_ops;
|
||||||
|
|
||||||
|
priv->dev->priv_flags |= IFF_UNICAST_FLT;
|
||||||
|
|
||||||
/* The loopback bit seems to be re-set when link change
|
/* The loopback bit seems to be re-set when link change
|
||||||
* Simply mask it each time
|
* Simply mask it each time
|
||||||
* Speed 10/100/1000 are set in BIT(2)/BIT(3)
|
* Speed 10/100/1000 are set in BIT(2)/BIT(3)
|
||||||
|
|
|
@ -19,4 +19,4 @@ ti_cpsw-y := cpsw.o davinci_cpdma.o cpsw_ale.o cpsw_priv.o cpsw_sl.o cpsw_ethtoo
|
||||||
obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o
|
obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o
|
||||||
keystone_netcp-y := netcp_core.o cpsw_ale.o
|
keystone_netcp-y := netcp_core.o cpsw_ale.o
|
||||||
obj-$(CONFIG_TI_KEYSTONE_NETCP_ETHSS) += keystone_netcp_ethss.o
|
obj-$(CONFIG_TI_KEYSTONE_NETCP_ETHSS) += keystone_netcp_ethss.o
|
||||||
keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o
|
keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o cpsw_ale.o
|
||||||
|
|
|
@ -2233,7 +2233,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
|
||||||
no_phy_slave:
|
no_phy_slave:
|
||||||
mac_addr = of_get_mac_address(slave_node);
|
mac_addr = of_get_mac_address(slave_node);
|
||||||
if (!IS_ERR(mac_addr)) {
|
if (!IS_ERR(mac_addr)) {
|
||||||
memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
|
ether_addr_copy(slave_data->mac_addr, mac_addr);
|
||||||
} else {
|
} else {
|
||||||
ret = ti_cm_get_macid(&pdev->dev, i,
|
ret = ti_cm_get_macid(&pdev->dev, i,
|
||||||
slave_data->mac_addr);
|
slave_data->mac_addr);
|
||||||
|
|
|
@ -361,7 +361,7 @@ static void temac_do_set_mac_address(struct net_device *ndev)
|
||||||
|
|
||||||
static int temac_init_mac_address(struct net_device *ndev, const void *address)
|
static int temac_init_mac_address(struct net_device *ndev, const void *address)
|
||||||
{
|
{
|
||||||
memcpy(ndev->dev_addr, address, ETH_ALEN);
|
ether_addr_copy(ndev->dev_addr, address);
|
||||||
if (!is_valid_ether_addr(ndev->dev_addr))
|
if (!is_valid_ether_addr(ndev->dev_addr))
|
||||||
eth_hw_addr_random(ndev);
|
eth_hw_addr_random(ndev);
|
||||||
temac_do_set_mac_address(ndev);
|
temac_do_set_mac_address(ndev);
|
||||||
|
|
|
@ -1167,7 +1167,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
|
||||||
|
|
||||||
if (!IS_ERR(mac_address)) {
|
if (!IS_ERR(mac_address)) {
|
||||||
/* Set the MAC address. */
|
/* Set the MAC address. */
|
||||||
memcpy(ndev->dev_addr, mac_address, ETH_ALEN);
|
ether_addr_copy(ndev->dev_addr, mac_address);
|
||||||
} else {
|
} else {
|
||||||
dev_warn(dev, "No MAC address found, using random\n");
|
dev_warn(dev, "No MAC address found, using random\n");
|
||||||
eth_hw_addr_random(ndev);
|
eth_hw_addr_random(ndev);
|
||||||
|
|
|
@ -33,7 +33,7 @@
|
||||||
#define ETH_PLL_CTL7 0x60
|
#define ETH_PLL_CTL7 0x60
|
||||||
|
|
||||||
#define ETH_PHY_CNTL0 0x80
|
#define ETH_PHY_CNTL0 0x80
|
||||||
#define EPHY_G12A_ID 0x33000180
|
#define EPHY_G12A_ID 0x33010180
|
||||||
#define ETH_PHY_CNTL1 0x84
|
#define ETH_PHY_CNTL1 0x84
|
||||||
#define PHY_CNTL1_ST_MODE GENMASK(2, 0)
|
#define PHY_CNTL1_ST_MODE GENMASK(2, 0)
|
||||||
#define PHY_CNTL1_ST_PHYADD GENMASK(7, 3)
|
#define PHY_CNTL1_ST_PHYADD GENMASK(7, 3)
|
||||||
|
|
|
@ -217,12 +217,12 @@ static int rtl8211e_config_init(struct phy_device *phydev)
|
||||||
if (oldpage < 0)
|
if (oldpage < 0)
|
||||||
goto err_restore_page;
|
goto err_restore_page;
|
||||||
|
|
||||||
ret = phy_write(phydev, RTL821x_EXT_PAGE_SELECT, 0xa4);
|
ret = __phy_write(phydev, RTL821x_EXT_PAGE_SELECT, 0xa4);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_restore_page;
|
goto err_restore_page;
|
||||||
|
|
||||||
ret = phy_modify(phydev, 0x1c, RTL8211E_TX_DELAY | RTL8211E_RX_DELAY,
|
ret = __phy_modify(phydev, 0x1c, RTL8211E_TX_DELAY | RTL8211E_RX_DELAY,
|
||||||
val);
|
val);
|
||||||
|
|
||||||
err_restore_page:
|
err_restore_page:
|
||||||
return phy_restore_page(phydev, oldpage, ret);
|
return phy_restore_page(phydev, oldpage, ret);
|
||||||
|
@ -275,6 +275,8 @@ static struct phy_driver realtek_drvs[] = {
|
||||||
.config_aneg = rtl8211_config_aneg,
|
.config_aneg = rtl8211_config_aneg,
|
||||||
.read_mmd = &genphy_read_mmd_unsupported,
|
.read_mmd = &genphy_read_mmd_unsupported,
|
||||||
.write_mmd = &genphy_write_mmd_unsupported,
|
.write_mmd = &genphy_write_mmd_unsupported,
|
||||||
|
.read_page = rtl821x_read_page,
|
||||||
|
.write_page = rtl821x_write_page,
|
||||||
}, {
|
}, {
|
||||||
PHY_ID_MATCH_EXACT(0x001cc912),
|
PHY_ID_MATCH_EXACT(0x001cc912),
|
||||||
.name = "RTL8211B Gigabit Ethernet",
|
.name = "RTL8211B Gigabit Ethernet",
|
||||||
|
@ -284,12 +286,16 @@ static struct phy_driver realtek_drvs[] = {
|
||||||
.write_mmd = &genphy_write_mmd_unsupported,
|
.write_mmd = &genphy_write_mmd_unsupported,
|
||||||
.suspend = rtl8211b_suspend,
|
.suspend = rtl8211b_suspend,
|
||||||
.resume = rtl8211b_resume,
|
.resume = rtl8211b_resume,
|
||||||
|
.read_page = rtl821x_read_page,
|
||||||
|
.write_page = rtl821x_write_page,
|
||||||
}, {
|
}, {
|
||||||
PHY_ID_MATCH_EXACT(0x001cc913),
|
PHY_ID_MATCH_EXACT(0x001cc913),
|
||||||
.name = "RTL8211C Gigabit Ethernet",
|
.name = "RTL8211C Gigabit Ethernet",
|
||||||
.config_init = rtl8211c_config_init,
|
.config_init = rtl8211c_config_init,
|
||||||
.read_mmd = &genphy_read_mmd_unsupported,
|
.read_mmd = &genphy_read_mmd_unsupported,
|
||||||
.write_mmd = &genphy_write_mmd_unsupported,
|
.write_mmd = &genphy_write_mmd_unsupported,
|
||||||
|
.read_page = rtl821x_read_page,
|
||||||
|
.write_page = rtl821x_write_page,
|
||||||
}, {
|
}, {
|
||||||
PHY_ID_MATCH_EXACT(0x001cc914),
|
PHY_ID_MATCH_EXACT(0x001cc914),
|
||||||
.name = "RTL8211DN Gigabit Ethernet",
|
.name = "RTL8211DN Gigabit Ethernet",
|
||||||
|
@ -297,6 +303,8 @@ static struct phy_driver realtek_drvs[] = {
|
||||||
.config_intr = rtl8211e_config_intr,
|
.config_intr = rtl8211e_config_intr,
|
||||||
.suspend = genphy_suspend,
|
.suspend = genphy_suspend,
|
||||||
.resume = genphy_resume,
|
.resume = genphy_resume,
|
||||||
|
.read_page = rtl821x_read_page,
|
||||||
|
.write_page = rtl821x_write_page,
|
||||||
}, {
|
}, {
|
||||||
PHY_ID_MATCH_EXACT(0x001cc915),
|
PHY_ID_MATCH_EXACT(0x001cc915),
|
||||||
.name = "RTL8211E Gigabit Ethernet",
|
.name = "RTL8211E Gigabit Ethernet",
|
||||||
|
@ -305,6 +313,8 @@ static struct phy_driver realtek_drvs[] = {
|
||||||
.config_intr = &rtl8211e_config_intr,
|
.config_intr = &rtl8211e_config_intr,
|
||||||
.suspend = genphy_suspend,
|
.suspend = genphy_suspend,
|
||||||
.resume = genphy_resume,
|
.resume = genphy_resume,
|
||||||
|
.read_page = rtl821x_read_page,
|
||||||
|
.write_page = rtl821x_write_page,
|
||||||
}, {
|
}, {
|
||||||
PHY_ID_MATCH_EXACT(0x001cc916),
|
PHY_ID_MATCH_EXACT(0x001cc916),
|
||||||
.name = "RTL8211F Gigabit Ethernet",
|
.name = "RTL8211F Gigabit Ethernet",
|
||||||
|
|
|
@ -95,7 +95,7 @@ mt76_eeprom_override(struct mt76_dev *dev)
|
||||||
|
|
||||||
mac = of_get_mac_address(np);
|
mac = of_get_mac_address(np);
|
||||||
if (!IS_ERR(mac))
|
if (!IS_ERR(mac))
|
||||||
memcpy(dev->macaddr, mac, ETH_ALEN);
|
ether_addr_copy(dev->macaddr, mac);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (!is_valid_ether_addr(dev->macaddr)) {
|
if (!is_valid_ether_addr(dev->macaddr)) {
|
||||||
|
|
|
@ -52,39 +52,25 @@ static const void *of_get_mac_addr(struct device_node *np, const char *name)
|
||||||
static const void *of_get_mac_addr_nvmem(struct device_node *np)
|
static const void *of_get_mac_addr_nvmem(struct device_node *np)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
u8 mac[ETH_ALEN];
|
const void *mac;
|
||||||
struct property *pp;
|
u8 nvmem_mac[ETH_ALEN];
|
||||||
struct platform_device *pdev = of_find_device_by_node(np);
|
struct platform_device *pdev = of_find_device_by_node(np);
|
||||||
|
|
||||||
if (!pdev)
|
if (!pdev)
|
||||||
return ERR_PTR(-ENODEV);
|
return ERR_PTR(-ENODEV);
|
||||||
|
|
||||||
ret = nvmem_get_mac_address(&pdev->dev, &mac);
|
ret = nvmem_get_mac_address(&pdev->dev, &nvmem_mac);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
put_device(&pdev->dev);
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
pp = devm_kzalloc(&pdev->dev, sizeof(*pp), GFP_KERNEL);
|
|
||||||
if (!pp)
|
|
||||||
return ERR_PTR(-ENOMEM);
|
|
||||||
|
|
||||||
pp->name = "nvmem-mac-address";
|
|
||||||
pp->length = ETH_ALEN;
|
|
||||||
pp->value = devm_kmemdup(&pdev->dev, mac, ETH_ALEN, GFP_KERNEL);
|
|
||||||
if (!pp->value) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto free;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = of_add_property(np, pp);
|
mac = devm_kmemdup(&pdev->dev, nvmem_mac, ETH_ALEN, GFP_KERNEL);
|
||||||
if (ret)
|
put_device(&pdev->dev);
|
||||||
goto free;
|
if (!mac)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
return pp->value;
|
return mac;
|
||||||
free:
|
|
||||||
devm_kfree(&pdev->dev, pp->value);
|
|
||||||
devm_kfree(&pdev->dev, pp);
|
|
||||||
|
|
||||||
return ERR_PTR(ret);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -99,24 +99,9 @@ struct __dsa_skb_cb {
|
||||||
|
|
||||||
#define DSA_SKB_CB(skb) ((struct dsa_skb_cb *)((skb)->cb))
|
#define DSA_SKB_CB(skb) ((struct dsa_skb_cb *)((skb)->cb))
|
||||||
|
|
||||||
#define DSA_SKB_CB_COPY(nskb, skb) \
|
|
||||||
{ *__DSA_SKB_CB(nskb) = *__DSA_SKB_CB(skb); }
|
|
||||||
|
|
||||||
#define DSA_SKB_CB_ZERO(skb) \
|
|
||||||
{ *__DSA_SKB_CB(skb) = (struct __dsa_skb_cb) {0}; }
|
|
||||||
|
|
||||||
#define DSA_SKB_CB_PRIV(skb) \
|
#define DSA_SKB_CB_PRIV(skb) \
|
||||||
((void *)(skb)->cb + offsetof(struct __dsa_skb_cb, priv))
|
((void *)(skb)->cb + offsetof(struct __dsa_skb_cb, priv))
|
||||||
|
|
||||||
#define DSA_SKB_CB_CLONE(_clone, _skb) \
|
|
||||||
{ \
|
|
||||||
struct sk_buff *clone = _clone; \
|
|
||||||
struct sk_buff *skb = _skb; \
|
|
||||||
\
|
|
||||||
DSA_SKB_CB_COPY(clone, skb); \
|
|
||||||
DSA_SKB_CB(skb)->clone = clone; \
|
|
||||||
}
|
|
||||||
|
|
||||||
struct dsa_switch_tree {
|
struct dsa_switch_tree {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
|
|
||||||
|
|
|
@ -629,7 +629,7 @@ union bpf_attr {
|
||||||
* **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\
|
* **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\
|
||||||
* **->swhash** and *skb*\ **->l4hash** to 0).
|
* **->swhash** and *skb*\ **->l4hash** to 0).
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -654,7 +654,7 @@ union bpf_attr {
|
||||||
* flexibility and can handle sizes larger than 2 or 4 for the
|
* flexibility and can handle sizes larger than 2 or 4 for the
|
||||||
* checksum to update.
|
* checksum to update.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -686,7 +686,7 @@ union bpf_attr {
|
||||||
* flexibility and can handle sizes larger than 2 or 4 for the
|
* flexibility and can handle sizes larger than 2 or 4 for the
|
||||||
* checksum to update.
|
* checksum to update.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -741,7 +741,7 @@ union bpf_attr {
|
||||||
* efficient, but it is handled through an action code where the
|
* efficient, but it is handled through an action code where the
|
||||||
* redirection happens only after the eBPF program has returned.
|
* redirection happens only after the eBPF program has returned.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -806,7 +806,7 @@ union bpf_attr {
|
||||||
* **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to
|
* **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to
|
||||||
* be **ETH_P_8021Q**.
|
* be **ETH_P_8021Q**.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -818,7 +818,7 @@ union bpf_attr {
|
||||||
* Description
|
* Description
|
||||||
* Pop a VLAN header from the packet associated to *skb*.
|
* Pop a VLAN header from the packet associated to *skb*.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -1168,7 +1168,7 @@ union bpf_attr {
|
||||||
* All values for *flags* are reserved for future usage, and must
|
* All values for *flags* are reserved for future usage, and must
|
||||||
* be left at zero.
|
* be left at zero.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -1281,7 +1281,7 @@ union bpf_attr {
|
||||||
* implicitly linearizes, unclones and drops offloads from the
|
* implicitly linearizes, unclones and drops offloads from the
|
||||||
* *skb*.
|
* *skb*.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -1317,7 +1317,7 @@ union bpf_attr {
|
||||||
* **bpf_skb_pull_data()** to effectively unclone the *skb* from
|
* **bpf_skb_pull_data()** to effectively unclone the *skb* from
|
||||||
* the very beginning in case it is indeed cloned.
|
* the very beginning in case it is indeed cloned.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -1369,7 +1369,7 @@ union bpf_attr {
|
||||||
* All values for *flags* are reserved for future usage, and must
|
* All values for *flags* are reserved for future usage, and must
|
||||||
* be left at zero.
|
* be left at zero.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -1384,7 +1384,7 @@ union bpf_attr {
|
||||||
* can be used to prepare the packet for pushing or popping
|
* can be used to prepare the packet for pushing or popping
|
||||||
* headers.
|
* headers.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -1518,20 +1518,20 @@ union bpf_attr {
|
||||||
* * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size.
|
* * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size.
|
||||||
* Adjusting mss in this way is not allowed for datagrams.
|
* Adjusting mss in this way is not allowed for datagrams.
|
||||||
*
|
*
|
||||||
* * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 **:
|
* * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**,
|
||||||
* * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 **:
|
* **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**:
|
||||||
* Any new space is reserved to hold a tunnel header.
|
* Any new space is reserved to hold a tunnel header.
|
||||||
* Configure skb offsets and other fields accordingly.
|
* Configure skb offsets and other fields accordingly.
|
||||||
*
|
*
|
||||||
* * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE **:
|
* * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**,
|
||||||
* * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP **:
|
* **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**:
|
||||||
* Use with ENCAP_L3 flags to further specify the tunnel type.
|
* Use with ENCAP_L3 flags to further specify the tunnel type.
|
||||||
*
|
*
|
||||||
* * **BPF_F_ADJ_ROOM_ENCAP_L2(len) **:
|
* * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*):
|
||||||
* Use with ENCAP_L3/L4 flags to further specify the tunnel
|
* Use with ENCAP_L3/L4 flags to further specify the tunnel
|
||||||
* type; **len** is the length of the inner MAC header.
|
* type; *len* is the length of the inner MAC header.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -1610,7 +1610,7 @@ union bpf_attr {
|
||||||
* more flexibility as the user is free to store whatever meta
|
* more flexibility as the user is free to store whatever meta
|
||||||
* data they need.
|
* data they need.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -1852,7 +1852,7 @@ union bpf_attr {
|
||||||
* copied if necessary (i.e. if data was not linear and if start
|
* copied if necessary (i.e. if data was not linear and if start
|
||||||
* and end pointers do not point to the same chunk).
|
* and end pointers do not point to the same chunk).
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -1886,7 +1886,7 @@ union bpf_attr {
|
||||||
* only possible to shrink the packet as of this writing,
|
* only possible to shrink the packet as of this writing,
|
||||||
* therefore *delta* must be a negative integer.
|
* therefore *delta* must be a negative integer.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -2061,18 +2061,18 @@ union bpf_attr {
|
||||||
* **BPF_LWT_ENCAP_IP**
|
* **BPF_LWT_ENCAP_IP**
|
||||||
* IP encapsulation (GRE/GUE/IPIP/etc). The outer header
|
* IP encapsulation (GRE/GUE/IPIP/etc). The outer header
|
||||||
* must be IPv4 or IPv6, followed by zero or more
|
* must be IPv4 or IPv6, followed by zero or more
|
||||||
* additional headers, up to LWT_BPF_MAX_HEADROOM total
|
* additional headers, up to **LWT_BPF_MAX_HEADROOM**
|
||||||
* bytes in all prepended headers. Please note that
|
* total bytes in all prepended headers. Please note that
|
||||||
* if skb_is_gso(skb) is true, no more than two headers
|
* if **skb_is_gso**\ (*skb*) is true, no more than two
|
||||||
* can be prepended, and the inner header, if present,
|
* headers can be prepended, and the inner header, if
|
||||||
* should be either GRE or UDP/GUE.
|
* present, should be either GRE or UDP/GUE.
|
||||||
*
|
*
|
||||||
* BPF_LWT_ENCAP_SEG6*** types can be called by bpf programs of
|
* **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs
|
||||||
* type BPF_PROG_TYPE_LWT_IN; BPF_LWT_ENCAP_IP type can be called
|
* of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can
|
||||||
* by bpf programs of types BPF_PROG_TYPE_LWT_IN and
|
* be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and
|
||||||
* BPF_PROG_TYPE_LWT_XMIT.
|
* **BPF_PROG_TYPE_LWT_XMIT**.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -2087,7 +2087,7 @@ union bpf_attr {
|
||||||
* inside the outermost IPv6 Segment Routing Header can be
|
* inside the outermost IPv6 Segment Routing Header can be
|
||||||
* modified through this helper.
|
* modified through this helper.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -2103,7 +2103,7 @@ union bpf_attr {
|
||||||
* after the segments are accepted. *delta* can be as well
|
* after the segments are accepted. *delta* can be as well
|
||||||
* positive (growing) as negative (shrinking).
|
* positive (growing) as negative (shrinking).
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -2126,13 +2126,13 @@ union bpf_attr {
|
||||||
* Type of *param*: **int**.
|
* Type of *param*: **int**.
|
||||||
* **SEG6_LOCAL_ACTION_END_B6**
|
* **SEG6_LOCAL_ACTION_END_B6**
|
||||||
* End.B6 action: Endpoint bound to an SRv6 policy.
|
* End.B6 action: Endpoint bound to an SRv6 policy.
|
||||||
* Type of param: **struct ipv6_sr_hdr**.
|
* Type of *param*: **struct ipv6_sr_hdr**.
|
||||||
* **SEG6_LOCAL_ACTION_END_B6_ENCAP**
|
* **SEG6_LOCAL_ACTION_END_B6_ENCAP**
|
||||||
* End.B6.Encap action: Endpoint bound to an SRv6
|
* End.B6.Encap action: Endpoint bound to an SRv6
|
||||||
* encapsulation policy.
|
* encapsulation policy.
|
||||||
* Type of param: **struct ipv6_sr_hdr**.
|
* Type of *param*: **struct ipv6_sr_hdr**.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -2285,7 +2285,8 @@ union bpf_attr {
|
||||||
* Return
|
* Return
|
||||||
* Pointer to **struct bpf_sock**, or **NULL** in case of failure.
|
* Pointer to **struct bpf_sock**, or **NULL** in case of failure.
|
||||||
* For sockets with reuseport option, the **struct bpf_sock**
|
* For sockets with reuseport option, the **struct bpf_sock**
|
||||||
* result is from **reuse->socks**\ [] using the hash of the tuple.
|
* result is from *reuse*\ **->socks**\ [] using the hash of the
|
||||||
|
* tuple.
|
||||||
*
|
*
|
||||||
* struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
|
* struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
|
||||||
* Description
|
* Description
|
||||||
|
@ -2321,7 +2322,8 @@ union bpf_attr {
|
||||||
* Return
|
* Return
|
||||||
* Pointer to **struct bpf_sock**, or **NULL** in case of failure.
|
* Pointer to **struct bpf_sock**, or **NULL** in case of failure.
|
||||||
* For sockets with reuseport option, the **struct bpf_sock**
|
* For sockets with reuseport option, the **struct bpf_sock**
|
||||||
* result is from **reuse->socks**\ [] using the hash of the tuple.
|
* result is from *reuse*\ **->socks**\ [] using the hash of the
|
||||||
|
* tuple.
|
||||||
*
|
*
|
||||||
* int bpf_sk_release(struct bpf_sock *sock)
|
* int bpf_sk_release(struct bpf_sock *sock)
|
||||||
* Description
|
* Description
|
||||||
|
@ -2490,31 +2492,34 @@ union bpf_attr {
|
||||||
* network namespace *netns*. The return value must be checked,
|
* network namespace *netns*. The return value must be checked,
|
||||||
* and if non-**NULL**, released via **bpf_sk_release**\ ().
|
* and if non-**NULL**, released via **bpf_sk_release**\ ().
|
||||||
*
|
*
|
||||||
* This function is identical to bpf_sk_lookup_tcp, except that it
|
* This function is identical to **bpf_sk_lookup_tcp**\ (), except
|
||||||
* also returns timewait or request sockets. Use bpf_sk_fullsock
|
* that it also returns timewait or request sockets. Use
|
||||||
* or bpf_tcp_socket to access the full structure.
|
* **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the
|
||||||
|
* full structure.
|
||||||
*
|
*
|
||||||
* This helper is available only if the kernel was compiled with
|
* This helper is available only if the kernel was compiled with
|
||||||
* **CONFIG_NET** configuration option.
|
* **CONFIG_NET** configuration option.
|
||||||
* Return
|
* Return
|
||||||
* Pointer to **struct bpf_sock**, or **NULL** in case of failure.
|
* Pointer to **struct bpf_sock**, or **NULL** in case of failure.
|
||||||
* For sockets with reuseport option, the **struct bpf_sock**
|
* For sockets with reuseport option, the **struct bpf_sock**
|
||||||
* result is from **reuse->socks**\ [] using the hash of the tuple.
|
* result is from *reuse*\ **->socks**\ [] using the hash of the
|
||||||
|
* tuple.
|
||||||
*
|
*
|
||||||
* int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
|
* int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
|
||||||
* Description
|
* Description
|
||||||
* Check whether iph and th contain a valid SYN cookie ACK for
|
* Check whether *iph* and *th* contain a valid SYN cookie ACK for
|
||||||
* the listening socket in sk.
|
* the listening socket in *sk*.
|
||||||
*
|
*
|
||||||
* iph points to the start of the IPv4 or IPv6 header, while
|
* *iph* points to the start of the IPv4 or IPv6 header, while
|
||||||
* iph_len contains sizeof(struct iphdr) or sizeof(struct ip6hdr).
|
* *iph_len* contains **sizeof**\ (**struct iphdr**) or
|
||||||
|
* **sizeof**\ (**struct ip6hdr**).
|
||||||
*
|
*
|
||||||
* th points to the start of the TCP header, while th_len contains
|
* *th* points to the start of the TCP header, while *th_len*
|
||||||
* sizeof(struct tcphdr).
|
* contains **sizeof**\ (**struct tcphdr**).
|
||||||
*
|
*
|
||||||
* Return
|
* Return
|
||||||
* 0 if iph and th are a valid SYN cookie ACK, or a negative error
|
* 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative
|
||||||
* otherwise.
|
* error otherwise.
|
||||||
*
|
*
|
||||||
* int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags)
|
* int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags)
|
||||||
* Description
|
* Description
|
||||||
|
@ -2592,17 +2597,17 @@ union bpf_attr {
|
||||||
* and save the result in *res*.
|
* and save the result in *res*.
|
||||||
*
|
*
|
||||||
* The string may begin with an arbitrary amount of white space
|
* The string may begin with an arbitrary amount of white space
|
||||||
* (as determined by isspace(3)) followed by a single optional '-'
|
* (as determined by **isspace**\ (3)) followed by a single
|
||||||
* sign.
|
* optional '**-**' sign.
|
||||||
*
|
*
|
||||||
* Five least significant bits of *flags* encode base, other bits
|
* Five least significant bits of *flags* encode base, other bits
|
||||||
* are currently unused.
|
* are currently unused.
|
||||||
*
|
*
|
||||||
* Base must be either 8, 10, 16 or 0 to detect it automatically
|
* Base must be either 8, 10, 16 or 0 to detect it automatically
|
||||||
* similar to user space strtol(3).
|
* similar to user space **strtol**\ (3).
|
||||||
* Return
|
* Return
|
||||||
* Number of characters consumed on success. Must be positive but
|
* Number of characters consumed on success. Must be positive but
|
||||||
* no more than buf_len.
|
* no more than *buf_len*.
|
||||||
*
|
*
|
||||||
* **-EINVAL** if no valid digits were found or unsupported base
|
* **-EINVAL** if no valid digits were found or unsupported base
|
||||||
* was provided.
|
* was provided.
|
||||||
|
@ -2616,16 +2621,16 @@ union bpf_attr {
|
||||||
* given base and save the result in *res*.
|
* given base and save the result in *res*.
|
||||||
*
|
*
|
||||||
* The string may begin with an arbitrary amount of white space
|
* The string may begin with an arbitrary amount of white space
|
||||||
* (as determined by isspace(3)).
|
* (as determined by **isspace**\ (3)).
|
||||||
*
|
*
|
||||||
* Five least significant bits of *flags* encode base, other bits
|
* Five least significant bits of *flags* encode base, other bits
|
||||||
* are currently unused.
|
* are currently unused.
|
||||||
*
|
*
|
||||||
* Base must be either 8, 10, 16 or 0 to detect it automatically
|
* Base must be either 8, 10, 16 or 0 to detect it automatically
|
||||||
* similar to user space strtoul(3).
|
* similar to user space **strtoul**\ (3).
|
||||||
* Return
|
* Return
|
||||||
* Number of characters consumed on success. Must be positive but
|
* Number of characters consumed on success. Must be positive but
|
||||||
* no more than buf_len.
|
* no more than *buf_len*.
|
||||||
*
|
*
|
||||||
* **-EINVAL** if no valid digits were found or unsupported base
|
* **-EINVAL** if no valid digits were found or unsupported base
|
||||||
* was provided.
|
* was provided.
|
||||||
|
@ -2634,26 +2639,26 @@ union bpf_attr {
|
||||||
*
|
*
|
||||||
* void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags)
|
* void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags)
|
||||||
* Description
|
* Description
|
||||||
* Get a bpf-local-storage from a sk.
|
* Get a bpf-local-storage from a *sk*.
|
||||||
*
|
*
|
||||||
* Logically, it could be thought of getting the value from
|
* Logically, it could be thought of getting the value from
|
||||||
* a *map* with *sk* as the **key**. From this
|
* a *map* with *sk* as the **key**. From this
|
||||||
* perspective, the usage is not much different from
|
* perspective, the usage is not much different from
|
||||||
* **bpf_map_lookup_elem(map, &sk)** except this
|
* **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this
|
||||||
* helper enforces the key must be a **bpf_fullsock()**
|
* helper enforces the key must be a full socket and the map must
|
||||||
* and the map must be a BPF_MAP_TYPE_SK_STORAGE also.
|
* be a **BPF_MAP_TYPE_SK_STORAGE** also.
|
||||||
*
|
*
|
||||||
* Underneath, the value is stored locally at *sk* instead of
|
* Underneath, the value is stored locally at *sk* instead of
|
||||||
* the map. The *map* is used as the bpf-local-storage **type**.
|
* the *map*. The *map* is used as the bpf-local-storage
|
||||||
* The bpf-local-storage **type** (i.e. the *map*) is searched
|
* "type". The bpf-local-storage "type" (i.e. the *map*) is
|
||||||
* against all bpf-local-storages residing at sk.
|
* searched against all bpf-local-storages residing at *sk*.
|
||||||
*
|
*
|
||||||
* An optional *flags* (BPF_SK_STORAGE_GET_F_CREATE) can be
|
* An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be
|
||||||
* used such that a new bpf-local-storage will be
|
* used such that a new bpf-local-storage will be
|
||||||
* created if one does not exist. *value* can be used
|
* created if one does not exist. *value* can be used
|
||||||
* together with BPF_SK_STORAGE_GET_F_CREATE to specify
|
* together with **BPF_SK_STORAGE_GET_F_CREATE** to specify
|
||||||
* the initial value of a bpf-local-storage. If *value* is
|
* the initial value of a bpf-local-storage. If *value* is
|
||||||
* NULL, the new bpf-local-storage will be zero initialized.
|
* **NULL**, the new bpf-local-storage will be zero initialized.
|
||||||
* Return
|
* Return
|
||||||
* A bpf-local-storage pointer is returned on success.
|
* A bpf-local-storage pointer is returned on success.
|
||||||
*
|
*
|
||||||
|
@ -2662,7 +2667,7 @@ union bpf_attr {
|
||||||
*
|
*
|
||||||
* int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk)
|
* int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk)
|
||||||
* Description
|
* Description
|
||||||
* Delete a bpf-local-storage from a sk.
|
* Delete a bpf-local-storage from a *sk*.
|
||||||
* Return
|
* Return
|
||||||
* 0 on success.
|
* 0 on success.
|
||||||
*
|
*
|
||||||
|
|
|
@ -966,7 +966,6 @@ enum nft_socket_keys {
|
||||||
* @NFT_CT_DST_IP: conntrack layer 3 protocol destination (IPv4 address)
|
* @NFT_CT_DST_IP: conntrack layer 3 protocol destination (IPv4 address)
|
||||||
* @NFT_CT_SRC_IP6: conntrack layer 3 protocol source (IPv6 address)
|
* @NFT_CT_SRC_IP6: conntrack layer 3 protocol source (IPv6 address)
|
||||||
* @NFT_CT_DST_IP6: conntrack layer 3 protocol destination (IPv6 address)
|
* @NFT_CT_DST_IP6: conntrack layer 3 protocol destination (IPv6 address)
|
||||||
* @NFT_CT_TIMEOUT: connection tracking timeout policy assigned to conntrack
|
|
||||||
* @NFT_CT_ID: conntrack id
|
* @NFT_CT_ID: conntrack id
|
||||||
*/
|
*/
|
||||||
enum nft_ct_keys {
|
enum nft_ct_keys {
|
||||||
|
@ -993,7 +992,6 @@ enum nft_ct_keys {
|
||||||
NFT_CT_DST_IP,
|
NFT_CT_DST_IP,
|
||||||
NFT_CT_SRC_IP6,
|
NFT_CT_SRC_IP6,
|
||||||
NFT_CT_DST_IP6,
|
NFT_CT_DST_IP6,
|
||||||
NFT_CT_TIMEOUT,
|
|
||||||
NFT_CT_ID,
|
NFT_CT_ID,
|
||||||
__NFT_CT_MAX
|
__NFT_CT_MAX
|
||||||
};
|
};
|
||||||
|
@ -1138,7 +1136,7 @@ enum nft_log_level {
|
||||||
NFT_LOGLEVEL_AUDIT,
|
NFT_LOGLEVEL_AUDIT,
|
||||||
__NFT_LOGLEVEL_MAX
|
__NFT_LOGLEVEL_MAX
|
||||||
};
|
};
|
||||||
#define NFT_LOGLEVEL_MAX (__NFT_LOGLEVEL_MAX + 1)
|
#define NFT_LOGLEVEL_MAX (__NFT_LOGLEVEL_MAX - 1)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* enum nft_queue_attributes - nf_tables queue expression netlink attributes
|
* enum nft_queue_attributes - nf_tables queue expression netlink attributes
|
||||||
|
|
|
@ -338,7 +338,7 @@ int bpf_prog_calc_tag(struct bpf_prog *fp)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
|
static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
|
||||||
s32 end_new, u32 curr, const bool probe_pass)
|
s32 end_new, s32 curr, const bool probe_pass)
|
||||||
{
|
{
|
||||||
const s64 imm_min = S32_MIN, imm_max = S32_MAX;
|
const s64 imm_min = S32_MIN, imm_max = S32_MAX;
|
||||||
s32 delta = end_new - end_old;
|
s32 delta = end_new - end_old;
|
||||||
|
@ -356,7 +356,7 @@ static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
|
||||||
}
|
}
|
||||||
|
|
||||||
static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
|
static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
|
||||||
s32 end_new, u32 curr, const bool probe_pass)
|
s32 end_new, s32 curr, const bool probe_pass)
|
||||||
{
|
{
|
||||||
const s32 off_min = S16_MIN, off_max = S16_MAX;
|
const s32 off_min = S16_MIN, off_max = S16_MAX;
|
||||||
s32 delta = end_new - end_old;
|
s32 delta = end_new - end_old;
|
||||||
|
|
|
@ -7599,7 +7599,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
||||||
insn->dst_reg,
|
insn->dst_reg,
|
||||||
shift);
|
shift);
|
||||||
insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
|
insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
|
||||||
(1 << size * 8) - 1);
|
(1ULL << size * 8) - 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -602,13 +602,15 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
|
||||||
call_netdevice_notifiers(NETDEV_JOIN, dev);
|
call_netdevice_notifiers(NETDEV_JOIN, dev);
|
||||||
|
|
||||||
err = dev_set_allmulti(dev, 1);
|
err = dev_set_allmulti(dev, 1);
|
||||||
if (err)
|
if (err) {
|
||||||
goto put_back;
|
kfree(p); /* kobject not yet init'd, manually free */
|
||||||
|
goto err1;
|
||||||
|
}
|
||||||
|
|
||||||
err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
|
err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
|
||||||
SYSFS_BRIDGE_PORT_ATTR);
|
SYSFS_BRIDGE_PORT_ATTR);
|
||||||
if (err)
|
if (err)
|
||||||
goto err1;
|
goto err2;
|
||||||
|
|
||||||
err = br_sysfs_addif(p);
|
err = br_sysfs_addif(p);
|
||||||
if (err)
|
if (err)
|
||||||
|
@ -700,12 +702,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
|
||||||
sysfs_remove_link(br->ifobj, p->dev->name);
|
sysfs_remove_link(br->ifobj, p->dev->name);
|
||||||
err2:
|
err2:
|
||||||
kobject_put(&p->kobj);
|
kobject_put(&p->kobj);
|
||||||
p = NULL; /* kobject_put frees */
|
|
||||||
err1:
|
|
||||||
dev_set_allmulti(dev, -1);
|
dev_set_allmulti(dev, -1);
|
||||||
put_back:
|
err1:
|
||||||
dev_put(dev);
|
dev_put(dev);
|
||||||
kfree(p);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2153,7 +2153,9 @@ static int compat_copy_entries(unsigned char *data, unsigned int size_user,
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
WARN_ON(size_remaining);
|
if (size_remaining)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
return state->buf_kern_offset;
|
return state->buf_kern_offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -734,7 +734,9 @@ bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
|
||||||
flow_keys->nhoff = nhoff;
|
flow_keys->nhoff = nhoff;
|
||||||
flow_keys->thoff = flow_keys->nhoff;
|
flow_keys->thoff = flow_keys->nhoff;
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
result = BPF_PROG_RUN(prog, ctx);
|
result = BPF_PROG_RUN(prog, ctx);
|
||||||
|
preempt_enable();
|
||||||
|
|
||||||
flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, nhoff, hlen);
|
flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, nhoff, hlen);
|
||||||
flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
|
flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
|
||||||
|
|
|
@ -263,7 +263,6 @@ int dccp_disconnect(struct sock *sk, int flags)
|
||||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||||
struct inet_sock *inet = inet_sk(sk);
|
struct inet_sock *inet = inet_sk(sk);
|
||||||
struct dccp_sock *dp = dccp_sk(sk);
|
struct dccp_sock *dp = dccp_sk(sk);
|
||||||
int err = 0;
|
|
||||||
const int old_state = sk->sk_state;
|
const int old_state = sk->sk_state;
|
||||||
|
|
||||||
if (old_state != DCCP_CLOSED)
|
if (old_state != DCCP_CLOSED)
|
||||||
|
@ -307,7 +306,7 @@ int dccp_disconnect(struct sock *sk, int flags)
|
||||||
WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
|
WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
|
||||||
|
|
||||||
sk->sk_error_report(sk);
|
sk->sk_error_report(sk);
|
||||||
return err;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(dccp_disconnect);
|
EXPORT_SYMBOL_GPL(dccp_disconnect);
|
||||||
|
|
|
@ -463,6 +463,8 @@ static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
s->tx_bytes += skb->len;
|
s->tx_bytes += skb->len;
|
||||||
u64_stats_update_end(&s->syncp);
|
u64_stats_update_end(&s->syncp);
|
||||||
|
|
||||||
|
DSA_SKB_CB(skb)->deferred_xmit = false;
|
||||||
|
|
||||||
/* Identify PTP protocol packets, clone them, and pass them to the
|
/* Identify PTP protocol packets, clone them, and pass them to the
|
||||||
* switch driver
|
* switch driver
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -206,10 +206,10 @@ static const struct dsa_device_ops brcm_prepend_netdev_ops = {
|
||||||
.rcv = brcm_tag_rcv_prepend,
|
.rcv = brcm_tag_rcv_prepend,
|
||||||
.overhead = BRCM_TAG_LEN,
|
.overhead = BRCM_TAG_LEN,
|
||||||
};
|
};
|
||||||
#endif
|
|
||||||
|
|
||||||
DSA_TAG_DRIVER(brcm_prepend_netdev_ops);
|
DSA_TAG_DRIVER(brcm_prepend_netdev_ops);
|
||||||
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_PREPEND);
|
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_PREPEND);
|
||||||
|
#endif
|
||||||
|
|
||||||
static struct dsa_tag_driver *dsa_tag_driver_array[] = {
|
static struct dsa_tag_driver *dsa_tag_driver_array[] = {
|
||||||
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM)
|
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM)
|
||||||
|
|
|
@ -172,7 +172,7 @@ static int nf_h323_error_boundary(struct bitstr *bs, size_t bytes, size_t bits)
|
||||||
if (bits % BITS_PER_BYTE > 0)
|
if (bits % BITS_PER_BYTE > 0)
|
||||||
bytes++;
|
bytes++;
|
||||||
|
|
||||||
if (*bs->cur + bytes > *bs->end)
|
if (bs->cur + bytes > bs->end)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -748,24 +748,19 @@ static int callforward_do_filter(struct net *net,
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV6)
|
#if IS_ENABLED(CONFIG_IPV6)
|
||||||
case AF_INET6: {
|
case AF_INET6: {
|
||||||
const struct nf_ipv6_ops *v6ops;
|
|
||||||
struct rt6_info *rt1, *rt2;
|
struct rt6_info *rt1, *rt2;
|
||||||
struct flowi6 fl1, fl2;
|
struct flowi6 fl1, fl2;
|
||||||
|
|
||||||
v6ops = nf_get_ipv6_ops();
|
|
||||||
if (!v6ops)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
memset(&fl1, 0, sizeof(fl1));
|
memset(&fl1, 0, sizeof(fl1));
|
||||||
fl1.daddr = src->in6;
|
fl1.daddr = src->in6;
|
||||||
|
|
||||||
memset(&fl2, 0, sizeof(fl2));
|
memset(&fl2, 0, sizeof(fl2));
|
||||||
fl2.daddr = dst->in6;
|
fl2.daddr = dst->in6;
|
||||||
if (!v6ops->route(net, (struct dst_entry **)&rt1,
|
if (!nf_ip6_route(net, (struct dst_entry **)&rt1,
|
||||||
flowi6_to_flowi(&fl1), false)) {
|
flowi6_to_flowi(&fl1), false)) {
|
||||||
if (!v6ops->route(net, (struct dst_entry **)&rt2,
|
if (!nf_ip6_route(net, (struct dst_entry **)&rt2,
|
||||||
flowi6_to_flowi(&fl2), false)) {
|
flowi6_to_flowi(&fl2), false)) {
|
||||||
if (ipv6_addr_equal(rt6_nexthop(rt1, &fl1.daddr),
|
if (ipv6_addr_equal(rt6_nexthop(rt1, &fl1.daddr),
|
||||||
rt6_nexthop(rt2, &fl2.daddr)) &&
|
rt6_nexthop(rt2, &fl2.daddr)) &&
|
||||||
|
|
|
@ -1256,7 +1256,7 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
|
||||||
struct nf_conntrack_tuple tuple;
|
struct nf_conntrack_tuple tuple;
|
||||||
struct nf_conn *ct;
|
struct nf_conn *ct;
|
||||||
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||||
u_int8_t u3 = nfmsg->nfgen_family;
|
u_int8_t u3 = nfmsg->version ? nfmsg->nfgen_family : AF_UNSPEC;
|
||||||
struct nf_conntrack_zone zone;
|
struct nf_conntrack_zone zone;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
|
|
@ -185,14 +185,25 @@ static const struct rhashtable_params nf_flow_offload_rhash_params = {
|
||||||
|
|
||||||
int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
|
int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
|
||||||
{
|
{
|
||||||
flow->timeout = (u32)jiffies;
|
int err;
|
||||||
|
|
||||||
rhashtable_insert_fast(&flow_table->rhashtable,
|
err = rhashtable_insert_fast(&flow_table->rhashtable,
|
||||||
&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
|
&flow->tuplehash[0].node,
|
||||||
nf_flow_offload_rhash_params);
|
nf_flow_offload_rhash_params);
|
||||||
rhashtable_insert_fast(&flow_table->rhashtable,
|
if (err < 0)
|
||||||
&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
|
return err;
|
||||||
nf_flow_offload_rhash_params);
|
|
||||||
|
err = rhashtable_insert_fast(&flow_table->rhashtable,
|
||||||
|
&flow->tuplehash[1].node,
|
||||||
|
nf_flow_offload_rhash_params);
|
||||||
|
if (err < 0) {
|
||||||
|
rhashtable_remove_fast(&flow_table->rhashtable,
|
||||||
|
&flow->tuplehash[0].node,
|
||||||
|
nf_flow_offload_rhash_params);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
flow->timeout = (u32)jiffies;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(flow_offload_add);
|
EXPORT_SYMBOL_GPL(flow_offload_add);
|
||||||
|
@ -232,6 +243,7 @@ flow_offload_lookup(struct nf_flowtable *flow_table,
|
||||||
{
|
{
|
||||||
struct flow_offload_tuple_rhash *tuplehash;
|
struct flow_offload_tuple_rhash *tuplehash;
|
||||||
struct flow_offload *flow;
|
struct flow_offload *flow;
|
||||||
|
struct flow_offload_entry *e;
|
||||||
int dir;
|
int dir;
|
||||||
|
|
||||||
tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
|
tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
|
||||||
|
@ -244,6 +256,10 @@ flow_offload_lookup(struct nf_flowtable *flow_table,
|
||||||
if (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))
|
if (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
e = container_of(flow, struct flow_offload_entry, flow);
|
||||||
|
if (unlikely(nf_ct_is_dying(e->ct)))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
return tuplehash;
|
return tuplehash;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(flow_offload_lookup);
|
EXPORT_SYMBOL_GPL(flow_offload_lookup);
|
||||||
|
@ -290,8 +306,10 @@ static inline bool nf_flow_has_expired(const struct flow_offload *flow)
|
||||||
static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
|
static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
|
||||||
{
|
{
|
||||||
struct nf_flowtable *flow_table = data;
|
struct nf_flowtable *flow_table = data;
|
||||||
|
struct flow_offload_entry *e;
|
||||||
|
|
||||||
if (nf_flow_has_expired(flow) ||
|
e = container_of(flow, struct flow_offload_entry, flow);
|
||||||
|
if (nf_flow_has_expired(flow) || nf_ct_is_dying(e->ct) ||
|
||||||
(flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN)))
|
(flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN)))
|
||||||
flow_offload_del(flow_table, flow);
|
flow_offload_del(flow_table, flow);
|
||||||
}
|
}
|
||||||
|
|
|
@ -181,6 +181,9 @@ static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
|
||||||
iph->protocol != IPPROTO_UDP)
|
iph->protocol != IPPROTO_UDP)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
if (iph->ttl <= 1)
|
||||||
|
return -1;
|
||||||
|
|
||||||
thoff = iph->ihl * 4;
|
thoff = iph->ihl * 4;
|
||||||
if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
|
if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -408,6 +411,9 @@ static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
|
||||||
ip6h->nexthdr != IPPROTO_UDP)
|
ip6h->nexthdr != IPPROTO_UDP)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
if (ip6h->hop_limit <= 1)
|
||||||
|
return -1;
|
||||||
|
|
||||||
thoff = sizeof(*ip6h);
|
thoff = sizeof(*ip6h);
|
||||||
if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
|
if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
|
@ -213,33 +213,33 @@ static int nft_deltable(struct nft_ctx *ctx)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nft_trans_chain_add(struct nft_ctx *ctx, int msg_type)
|
static struct nft_trans *nft_trans_chain_add(struct nft_ctx *ctx, int msg_type)
|
||||||
{
|
{
|
||||||
struct nft_trans *trans;
|
struct nft_trans *trans;
|
||||||
|
|
||||||
trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_chain));
|
trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_chain));
|
||||||
if (trans == NULL)
|
if (trans == NULL)
|
||||||
return -ENOMEM;
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
if (msg_type == NFT_MSG_NEWCHAIN)
|
if (msg_type == NFT_MSG_NEWCHAIN)
|
||||||
nft_activate_next(ctx->net, ctx->chain);
|
nft_activate_next(ctx->net, ctx->chain);
|
||||||
|
|
||||||
list_add_tail(&trans->list, &ctx->net->nft.commit_list);
|
list_add_tail(&trans->list, &ctx->net->nft.commit_list);
|
||||||
return 0;
|
return trans;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nft_delchain(struct nft_ctx *ctx)
|
static int nft_delchain(struct nft_ctx *ctx)
|
||||||
{
|
{
|
||||||
int err;
|
struct nft_trans *trans;
|
||||||
|
|
||||||
err = nft_trans_chain_add(ctx, NFT_MSG_DELCHAIN);
|
trans = nft_trans_chain_add(ctx, NFT_MSG_DELCHAIN);
|
||||||
if (err < 0)
|
if (IS_ERR(trans))
|
||||||
return err;
|
return PTR_ERR(trans);
|
||||||
|
|
||||||
ctx->table->use--;
|
ctx->table->use--;
|
||||||
nft_deactivate_next(ctx->net, ctx->chain);
|
nft_deactivate_next(ctx->net, ctx->chain);
|
||||||
|
|
||||||
return err;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nft_rule_expr_activate(const struct nft_ctx *ctx,
|
static void nft_rule_expr_activate(const struct nft_ctx *ctx,
|
||||||
|
@ -1189,6 +1189,9 @@ static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats)
|
||||||
u64 pkts, bytes;
|
u64 pkts, bytes;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
|
if (!stats)
|
||||||
|
return 0;
|
||||||
|
|
||||||
memset(&total, 0, sizeof(total));
|
memset(&total, 0, sizeof(total));
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
cpu_stats = per_cpu_ptr(stats, cpu);
|
cpu_stats = per_cpu_ptr(stats, cpu);
|
||||||
|
@ -1246,6 +1249,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
|
||||||
if (nft_is_base_chain(chain)) {
|
if (nft_is_base_chain(chain)) {
|
||||||
const struct nft_base_chain *basechain = nft_base_chain(chain);
|
const struct nft_base_chain *basechain = nft_base_chain(chain);
|
||||||
const struct nf_hook_ops *ops = &basechain->ops;
|
const struct nf_hook_ops *ops = &basechain->ops;
|
||||||
|
struct nft_stats __percpu *stats;
|
||||||
struct nlattr *nest;
|
struct nlattr *nest;
|
||||||
|
|
||||||
nest = nla_nest_start_noflag(skb, NFTA_CHAIN_HOOK);
|
nest = nla_nest_start_noflag(skb, NFTA_CHAIN_HOOK);
|
||||||
|
@ -1267,8 +1271,9 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
|
||||||
if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name))
|
if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name))
|
||||||
goto nla_put_failure;
|
goto nla_put_failure;
|
||||||
|
|
||||||
if (rcu_access_pointer(basechain->stats) &&
|
stats = rcu_dereference_check(basechain->stats,
|
||||||
nft_dump_stats(skb, rcu_dereference(basechain->stats)))
|
lockdep_commit_lock_is_held(net));
|
||||||
|
if (nft_dump_stats(skb, stats))
|
||||||
goto nla_put_failure;
|
goto nla_put_failure;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1615,6 +1620,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
|
||||||
struct nft_base_chain *basechain;
|
struct nft_base_chain *basechain;
|
||||||
struct nft_stats __percpu *stats;
|
struct nft_stats __percpu *stats;
|
||||||
struct net *net = ctx->net;
|
struct net *net = ctx->net;
|
||||||
|
struct nft_trans *trans;
|
||||||
struct nft_chain *chain;
|
struct nft_chain *chain;
|
||||||
struct nft_rule **rules;
|
struct nft_rule **rules;
|
||||||
int err;
|
int err;
|
||||||
|
@ -1662,7 +1668,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
|
||||||
ops->dev = hook.dev;
|
ops->dev = hook.dev;
|
||||||
|
|
||||||
chain->flags |= NFT_BASE_CHAIN;
|
chain->flags |= NFT_BASE_CHAIN;
|
||||||
basechain->policy = policy;
|
basechain->policy = NF_ACCEPT;
|
||||||
} else {
|
} else {
|
||||||
chain = kzalloc(sizeof(*chain), GFP_KERNEL);
|
chain = kzalloc(sizeof(*chain), GFP_KERNEL);
|
||||||
if (chain == NULL)
|
if (chain == NULL)
|
||||||
|
@ -1698,13 +1704,18 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
|
||||||
if (err)
|
if (err)
|
||||||
goto err2;
|
goto err2;
|
||||||
|
|
||||||
err = nft_trans_chain_add(ctx, NFT_MSG_NEWCHAIN);
|
trans = nft_trans_chain_add(ctx, NFT_MSG_NEWCHAIN);
|
||||||
if (err < 0) {
|
if (IS_ERR(trans)) {
|
||||||
|
err = PTR_ERR(trans);
|
||||||
rhltable_remove(&table->chains_ht, &chain->rhlhead,
|
rhltable_remove(&table->chains_ht, &chain->rhlhead,
|
||||||
nft_chain_ht_params);
|
nft_chain_ht_params);
|
||||||
goto err2;
|
goto err2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nft_trans_chain_policy(trans) = -1;
|
||||||
|
if (nft_is_base_chain(chain))
|
||||||
|
nft_trans_chain_policy(trans) = policy;
|
||||||
|
|
||||||
table->use++;
|
table->use++;
|
||||||
list_add_tail_rcu(&chain->list, &table->chains);
|
list_add_tail_rcu(&chain->list, &table->chains);
|
||||||
|
|
||||||
|
@ -6310,6 +6321,27 @@ static int nf_tables_validate(struct net *net)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* a drop policy has to be deferred until all rules have been activated,
|
||||||
|
* otherwise a large ruleset that contains a drop-policy base chain will
|
||||||
|
* cause all packets to get dropped until the full transaction has been
|
||||||
|
* processed.
|
||||||
|
*
|
||||||
|
* We defer the drop policy until the transaction has been finalized.
|
||||||
|
*/
|
||||||
|
static void nft_chain_commit_drop_policy(struct nft_trans *trans)
|
||||||
|
{
|
||||||
|
struct nft_base_chain *basechain;
|
||||||
|
|
||||||
|
if (nft_trans_chain_policy(trans) != NF_DROP)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!nft_is_base_chain(trans->ctx.chain))
|
||||||
|
return;
|
||||||
|
|
||||||
|
basechain = nft_base_chain(trans->ctx.chain);
|
||||||
|
basechain->policy = NF_DROP;
|
||||||
|
}
|
||||||
|
|
||||||
static void nft_chain_commit_update(struct nft_trans *trans)
|
static void nft_chain_commit_update(struct nft_trans *trans)
|
||||||
{
|
{
|
||||||
struct nft_base_chain *basechain;
|
struct nft_base_chain *basechain;
|
||||||
|
@ -6631,6 +6663,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
|
||||||
nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
|
nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
|
||||||
/* trans destroyed after rcu grace period */
|
/* trans destroyed after rcu grace period */
|
||||||
} else {
|
} else {
|
||||||
|
nft_chain_commit_drop_policy(trans);
|
||||||
nft_clear(net, trans->ctx.chain);
|
nft_clear(net, trans->ctx.chain);
|
||||||
nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
|
nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
|
||||||
nft_trans_destroy(trans);
|
nft_trans_destroy(trans);
|
||||||
|
|
|
@ -94,8 +94,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
|
||||||
if (help)
|
if (help)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (ctinfo == IP_CT_NEW ||
|
if (!nf_ct_is_confirmed(ct))
|
||||||
ctinfo == IP_CT_RELATED)
|
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
|
if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
|
||||||
|
@ -113,6 +112,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto err_flow_add;
|
goto err_flow_add;
|
||||||
|
|
||||||
|
dst_release(route.tuple[!dir].dst);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
err_flow_add:
|
err_flow_add:
|
||||||
|
|
|
@ -728,12 +728,13 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
|
||||||
DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
|
DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
|
||||||
int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *, int,
|
int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *, int,
|
||||||
struct sockaddr_qrtr *, struct sockaddr_qrtr *);
|
struct sockaddr_qrtr *, struct sockaddr_qrtr *);
|
||||||
|
__le32 qrtr_type = cpu_to_le32(QRTR_TYPE_DATA);
|
||||||
struct qrtr_sock *ipc = qrtr_sk(sock->sk);
|
struct qrtr_sock *ipc = qrtr_sk(sock->sk);
|
||||||
struct sock *sk = sock->sk;
|
struct sock *sk = sock->sk;
|
||||||
struct qrtr_node *node;
|
struct qrtr_node *node;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
u32 type = 0;
|
||||||
size_t plen;
|
size_t plen;
|
||||||
u32 type = QRTR_TYPE_DATA;
|
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
if (msg->msg_flags & ~(MSG_DONTWAIT))
|
if (msg->msg_flags & ~(MSG_DONTWAIT))
|
||||||
|
@ -807,8 +808,8 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* control messages already require the type as 'command' */
|
/* control messages already require the type as 'command' */
|
||||||
skb_copy_bits(skb, 0, &type, 4);
|
skb_copy_bits(skb, 0, &qrtr_type, 4);
|
||||||
type = le32_to_cpu(type);
|
type = le32_to_cpu(qrtr_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = enqueue_fn(node, skb, type, &ipc->us, addr);
|
rc = enqueue_fn(node, skb, type, &ipc->us, addr);
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
# SPDX-License-Identifier: GPL-2.0-only
|
# SPDX-License-Identifier: GPL-2.0-only
|
||||||
#
|
#
|
||||||
# Copyright (C) 2018 Netronome Systems, Inc.
|
# Copyright (C) 2018-2019 Netronome Systems, Inc.
|
||||||
|
|
||||||
# In case user attempts to run with Python 2.
|
# In case user attempts to run with Python 2.
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
@ -39,7 +39,7 @@ class Helper(object):
|
||||||
Break down helper function protocol into smaller chunks: return type,
|
Break down helper function protocol into smaller chunks: return type,
|
||||||
name, distincts arguments.
|
name, distincts arguments.
|
||||||
"""
|
"""
|
||||||
arg_re = re.compile('((const )?(struct )?(\w+|...))( (\**)(\w+))?$')
|
arg_re = re.compile('((\w+ )*?(\w+|...))( (\**)(\w+))?$')
|
||||||
res = {}
|
res = {}
|
||||||
proto_re = re.compile('(.+) (\**)(\w+)\(((([^,]+)(, )?){1,5})\)$')
|
proto_re = re.compile('(.+) (\**)(\w+)\(((([^,]+)(, )?){1,5})\)$')
|
||||||
|
|
||||||
|
@ -54,8 +54,8 @@ class Helper(object):
|
||||||
capture = arg_re.match(a)
|
capture = arg_re.match(a)
|
||||||
res['args'].append({
|
res['args'].append({
|
||||||
'type' : capture.group(1),
|
'type' : capture.group(1),
|
||||||
'star' : capture.group(6),
|
'star' : capture.group(5),
|
||||||
'name' : capture.group(7)
|
'name' : capture.group(6)
|
||||||
})
|
})
|
||||||
|
|
||||||
return res
|
return res
|
||||||
|
|
|
@ -4649,7 +4649,7 @@ static int selinux_socket_connect_helper(struct socket *sock,
|
||||||
struct lsm_network_audit net = {0,};
|
struct lsm_network_audit net = {0,};
|
||||||
struct sockaddr_in *addr4 = NULL;
|
struct sockaddr_in *addr4 = NULL;
|
||||||
struct sockaddr_in6 *addr6 = NULL;
|
struct sockaddr_in6 *addr6 = NULL;
|
||||||
unsigned short snum = 0;
|
unsigned short snum;
|
||||||
u32 sid, perm;
|
u32 sid, perm;
|
||||||
|
|
||||||
/* sctp_connectx(3) calls via selinux_sctp_bind_connect()
|
/* sctp_connectx(3) calls via selinux_sctp_bind_connect()
|
||||||
|
@ -4674,12 +4674,12 @@ static int selinux_socket_connect_helper(struct socket *sock,
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
/* Note that SCTP services expect -EINVAL, whereas
|
/* Note that SCTP services expect -EINVAL, whereas
|
||||||
* others must handle this at the protocol level:
|
* others expect -EAFNOSUPPORT.
|
||||||
* connect(AF_UNSPEC) on a connected socket is
|
|
||||||
* a documented way disconnect the socket.
|
|
||||||
*/
|
*/
|
||||||
if (sksec->sclass == SECCLASS_SCTP_SOCKET)
|
if (sksec->sclass == SECCLASS_SCTP_SOCKET)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
else
|
||||||
|
return -EAFNOSUPPORT;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = sel_netport_sid(sk->sk_protocol, snum, &sid);
|
err = sel_netport_sid(sk->sk_protocol, snum, &sid);
|
||||||
|
|
|
@ -629,7 +629,7 @@ union bpf_attr {
|
||||||
* **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\
|
* **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\
|
||||||
* **->swhash** and *skb*\ **->l4hash** to 0).
|
* **->swhash** and *skb*\ **->l4hash** to 0).
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -654,7 +654,7 @@ union bpf_attr {
|
||||||
* flexibility and can handle sizes larger than 2 or 4 for the
|
* flexibility and can handle sizes larger than 2 or 4 for the
|
||||||
* checksum to update.
|
* checksum to update.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -686,7 +686,7 @@ union bpf_attr {
|
||||||
* flexibility and can handle sizes larger than 2 or 4 for the
|
* flexibility and can handle sizes larger than 2 or 4 for the
|
||||||
* checksum to update.
|
* checksum to update.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -741,7 +741,7 @@ union bpf_attr {
|
||||||
* efficient, but it is handled through an action code where the
|
* efficient, but it is handled through an action code where the
|
||||||
* redirection happens only after the eBPF program has returned.
|
* redirection happens only after the eBPF program has returned.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -806,7 +806,7 @@ union bpf_attr {
|
||||||
* **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to
|
* **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to
|
||||||
* be **ETH_P_8021Q**.
|
* be **ETH_P_8021Q**.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -818,7 +818,7 @@ union bpf_attr {
|
||||||
* Description
|
* Description
|
||||||
* Pop a VLAN header from the packet associated to *skb*.
|
* Pop a VLAN header from the packet associated to *skb*.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -1168,7 +1168,7 @@ union bpf_attr {
|
||||||
* All values for *flags* are reserved for future usage, and must
|
* All values for *flags* are reserved for future usage, and must
|
||||||
* be left at zero.
|
* be left at zero.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -1281,7 +1281,7 @@ union bpf_attr {
|
||||||
* implicitly linearizes, unclones and drops offloads from the
|
* implicitly linearizes, unclones and drops offloads from the
|
||||||
* *skb*.
|
* *skb*.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -1317,7 +1317,7 @@ union bpf_attr {
|
||||||
* **bpf_skb_pull_data()** to effectively unclone the *skb* from
|
* **bpf_skb_pull_data()** to effectively unclone the *skb* from
|
||||||
* the very beginning in case it is indeed cloned.
|
* the very beginning in case it is indeed cloned.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -1369,7 +1369,7 @@ union bpf_attr {
|
||||||
* All values for *flags* are reserved for future usage, and must
|
* All values for *flags* are reserved for future usage, and must
|
||||||
* be left at zero.
|
* be left at zero.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -1384,7 +1384,7 @@ union bpf_attr {
|
||||||
* can be used to prepare the packet for pushing or popping
|
* can be used to prepare the packet for pushing or popping
|
||||||
* headers.
|
* headers.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -1518,20 +1518,20 @@ union bpf_attr {
|
||||||
* * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size.
|
* * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size.
|
||||||
* Adjusting mss in this way is not allowed for datagrams.
|
* Adjusting mss in this way is not allowed for datagrams.
|
||||||
*
|
*
|
||||||
* * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 **:
|
* * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**,
|
||||||
* * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 **:
|
* **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**:
|
||||||
* Any new space is reserved to hold a tunnel header.
|
* Any new space is reserved to hold a tunnel header.
|
||||||
* Configure skb offsets and other fields accordingly.
|
* Configure skb offsets and other fields accordingly.
|
||||||
*
|
*
|
||||||
* * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE **:
|
* * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**,
|
||||||
* * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP **:
|
* **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**:
|
||||||
* Use with ENCAP_L3 flags to further specify the tunnel type.
|
* Use with ENCAP_L3 flags to further specify the tunnel type.
|
||||||
*
|
*
|
||||||
* * **BPF_F_ADJ_ROOM_ENCAP_L2(len) **:
|
* * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*):
|
||||||
* Use with ENCAP_L3/L4 flags to further specify the tunnel
|
* Use with ENCAP_L3/L4 flags to further specify the tunnel
|
||||||
* type; **len** is the length of the inner MAC header.
|
* type; *len* is the length of the inner MAC header.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -1610,7 +1610,7 @@ union bpf_attr {
|
||||||
* more flexibility as the user is free to store whatever meta
|
* more flexibility as the user is free to store whatever meta
|
||||||
* data they need.
|
* data they need.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -1852,7 +1852,7 @@ union bpf_attr {
|
||||||
* copied if necessary (i.e. if data was not linear and if start
|
* copied if necessary (i.e. if data was not linear and if start
|
||||||
* and end pointers do not point to the same chunk).
|
* and end pointers do not point to the same chunk).
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -1886,7 +1886,7 @@ union bpf_attr {
|
||||||
* only possible to shrink the packet as of this writing,
|
* only possible to shrink the packet as of this writing,
|
||||||
* therefore *delta* must be a negative integer.
|
* therefore *delta* must be a negative integer.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -2061,18 +2061,18 @@ union bpf_attr {
|
||||||
* **BPF_LWT_ENCAP_IP**
|
* **BPF_LWT_ENCAP_IP**
|
||||||
* IP encapsulation (GRE/GUE/IPIP/etc). The outer header
|
* IP encapsulation (GRE/GUE/IPIP/etc). The outer header
|
||||||
* must be IPv4 or IPv6, followed by zero or more
|
* must be IPv4 or IPv6, followed by zero or more
|
||||||
* additional headers, up to LWT_BPF_MAX_HEADROOM total
|
* additional headers, up to **LWT_BPF_MAX_HEADROOM**
|
||||||
* bytes in all prepended headers. Please note that
|
* total bytes in all prepended headers. Please note that
|
||||||
* if skb_is_gso(skb) is true, no more than two headers
|
* if **skb_is_gso**\ (*skb*) is true, no more than two
|
||||||
* can be prepended, and the inner header, if present,
|
* headers can be prepended, and the inner header, if
|
||||||
* should be either GRE or UDP/GUE.
|
* present, should be either GRE or UDP/GUE.
|
||||||
*
|
*
|
||||||
* BPF_LWT_ENCAP_SEG6*** types can be called by bpf programs of
|
* **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs
|
||||||
* type BPF_PROG_TYPE_LWT_IN; BPF_LWT_ENCAP_IP type can be called
|
* of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can
|
||||||
* by bpf programs of types BPF_PROG_TYPE_LWT_IN and
|
* be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and
|
||||||
* BPF_PROG_TYPE_LWT_XMIT.
|
* **BPF_PROG_TYPE_LWT_XMIT**.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -2087,7 +2087,7 @@ union bpf_attr {
|
||||||
* inside the outermost IPv6 Segment Routing Header can be
|
* inside the outermost IPv6 Segment Routing Header can be
|
||||||
* modified through this helper.
|
* modified through this helper.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -2103,7 +2103,7 @@ union bpf_attr {
|
||||||
* after the segments are accepted. *delta* can be as well
|
* after the segments are accepted. *delta* can be as well
|
||||||
* positive (growing) as negative (shrinking).
|
* positive (growing) as negative (shrinking).
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -2126,13 +2126,13 @@ union bpf_attr {
|
||||||
* Type of *param*: **int**.
|
* Type of *param*: **int**.
|
||||||
* **SEG6_LOCAL_ACTION_END_B6**
|
* **SEG6_LOCAL_ACTION_END_B6**
|
||||||
* End.B6 action: Endpoint bound to an SRv6 policy.
|
* End.B6 action: Endpoint bound to an SRv6 policy.
|
||||||
* Type of param: **struct ipv6_sr_hdr**.
|
* Type of *param*: **struct ipv6_sr_hdr**.
|
||||||
* **SEG6_LOCAL_ACTION_END_B6_ENCAP**
|
* **SEG6_LOCAL_ACTION_END_B6_ENCAP**
|
||||||
* End.B6.Encap action: Endpoint bound to an SRv6
|
* End.B6.Encap action: Endpoint bound to an SRv6
|
||||||
* encapsulation policy.
|
* encapsulation policy.
|
||||||
* Type of param: **struct ipv6_sr_hdr**.
|
* Type of *param*: **struct ipv6_sr_hdr**.
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlaying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
* previously done by the verifier are invalidated and must be
|
* previously done by the verifier are invalidated and must be
|
||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
|
@ -2285,7 +2285,8 @@ union bpf_attr {
|
||||||
* Return
|
* Return
|
||||||
* Pointer to **struct bpf_sock**, or **NULL** in case of failure.
|
* Pointer to **struct bpf_sock**, or **NULL** in case of failure.
|
||||||
* For sockets with reuseport option, the **struct bpf_sock**
|
* For sockets with reuseport option, the **struct bpf_sock**
|
||||||
* result is from **reuse->socks**\ [] using the hash of the tuple.
|
* result is from *reuse*\ **->socks**\ [] using the hash of the
|
||||||
|
* tuple.
|
||||||
*
|
*
|
||||||
* struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
|
* struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
|
||||||
* Description
|
* Description
|
||||||
|
@ -2321,7 +2322,8 @@ union bpf_attr {
|
||||||
* Return
|
* Return
|
||||||
* Pointer to **struct bpf_sock**, or **NULL** in case of failure.
|
* Pointer to **struct bpf_sock**, or **NULL** in case of failure.
|
||||||
* For sockets with reuseport option, the **struct bpf_sock**
|
* For sockets with reuseport option, the **struct bpf_sock**
|
||||||
* result is from **reuse->socks**\ [] using the hash of the tuple.
|
* result is from *reuse*\ **->socks**\ [] using the hash of the
|
||||||
|
* tuple.
|
||||||
*
|
*
|
||||||
* int bpf_sk_release(struct bpf_sock *sock)
|
* int bpf_sk_release(struct bpf_sock *sock)
|
||||||
* Description
|
* Description
|
||||||
|
@ -2490,31 +2492,34 @@ union bpf_attr {
|
||||||
* network namespace *netns*. The return value must be checked,
|
* network namespace *netns*. The return value must be checked,
|
||||||
* and if non-**NULL**, released via **bpf_sk_release**\ ().
|
* and if non-**NULL**, released via **bpf_sk_release**\ ().
|
||||||
*
|
*
|
||||||
* This function is identical to bpf_sk_lookup_tcp, except that it
|
* This function is identical to **bpf_sk_lookup_tcp**\ (), except
|
||||||
* also returns timewait or request sockets. Use bpf_sk_fullsock
|
* that it also returns timewait or request sockets. Use
|
||||||
* or bpf_tcp_socket to access the full structure.
|
* **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the
|
||||||
|
* full structure.
|
||||||
*
|
*
|
||||||
* This helper is available only if the kernel was compiled with
|
* This helper is available only if the kernel was compiled with
|
||||||
* **CONFIG_NET** configuration option.
|
* **CONFIG_NET** configuration option.
|
||||||
* Return
|
* Return
|
||||||
* Pointer to **struct bpf_sock**, or **NULL** in case of failure.
|
* Pointer to **struct bpf_sock**, or **NULL** in case of failure.
|
||||||
* For sockets with reuseport option, the **struct bpf_sock**
|
* For sockets with reuseport option, the **struct bpf_sock**
|
||||||
* result is from **reuse->socks**\ [] using the hash of the tuple.
|
* result is from *reuse*\ **->socks**\ [] using the hash of the
|
||||||
|
* tuple.
|
||||||
*
|
*
|
||||||
* int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
|
* int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
|
||||||
* Description
|
* Description
|
||||||
* Check whether iph and th contain a valid SYN cookie ACK for
|
* Check whether *iph* and *th* contain a valid SYN cookie ACK for
|
||||||
* the listening socket in sk.
|
* the listening socket in *sk*.
|
||||||
*
|
*
|
||||||
* iph points to the start of the IPv4 or IPv6 header, while
|
* *iph* points to the start of the IPv4 or IPv6 header, while
|
||||||
* iph_len contains sizeof(struct iphdr) or sizeof(struct ip6hdr).
|
* *iph_len* contains **sizeof**\ (**struct iphdr**) or
|
||||||
|
* **sizeof**\ (**struct ip6hdr**).
|
||||||
*
|
*
|
||||||
* th points to the start of the TCP header, while th_len contains
|
* *th* points to the start of the TCP header, while *th_len*
|
||||||
* sizeof(struct tcphdr).
|
* contains **sizeof**\ (**struct tcphdr**).
|
||||||
*
|
*
|
||||||
* Return
|
* Return
|
||||||
* 0 if iph and th are a valid SYN cookie ACK, or a negative error
|
* 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative
|
||||||
* otherwise.
|
* error otherwise.
|
||||||
*
|
*
|
||||||
* int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags)
|
* int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags)
|
||||||
* Description
|
* Description
|
||||||
|
@ -2592,17 +2597,17 @@ union bpf_attr {
|
||||||
* and save the result in *res*.
|
* and save the result in *res*.
|
||||||
*
|
*
|
||||||
* The string may begin with an arbitrary amount of white space
|
* The string may begin with an arbitrary amount of white space
|
||||||
* (as determined by isspace(3)) followed by a single optional '-'
|
* (as determined by **isspace**\ (3)) followed by a single
|
||||||
* sign.
|
* optional '**-**' sign.
|
||||||
*
|
*
|
||||||
* Five least significant bits of *flags* encode base, other bits
|
* Five least significant bits of *flags* encode base, other bits
|
||||||
* are currently unused.
|
* are currently unused.
|
||||||
*
|
*
|
||||||
* Base must be either 8, 10, 16 or 0 to detect it automatically
|
* Base must be either 8, 10, 16 or 0 to detect it automatically
|
||||||
* similar to user space strtol(3).
|
* similar to user space **strtol**\ (3).
|
||||||
* Return
|
* Return
|
||||||
* Number of characters consumed on success. Must be positive but
|
* Number of characters consumed on success. Must be positive but
|
||||||
* no more than buf_len.
|
* no more than *buf_len*.
|
||||||
*
|
*
|
||||||
* **-EINVAL** if no valid digits were found or unsupported base
|
* **-EINVAL** if no valid digits were found or unsupported base
|
||||||
* was provided.
|
* was provided.
|
||||||
|
@ -2616,16 +2621,16 @@ union bpf_attr {
|
||||||
* given base and save the result in *res*.
|
* given base and save the result in *res*.
|
||||||
*
|
*
|
||||||
* The string may begin with an arbitrary amount of white space
|
* The string may begin with an arbitrary amount of white space
|
||||||
* (as determined by isspace(3)).
|
* (as determined by **isspace**\ (3)).
|
||||||
*
|
*
|
||||||
* Five least significant bits of *flags* encode base, other bits
|
* Five least significant bits of *flags* encode base, other bits
|
||||||
* are currently unused.
|
* are currently unused.
|
||||||
*
|
*
|
||||||
* Base must be either 8, 10, 16 or 0 to detect it automatically
|
* Base must be either 8, 10, 16 or 0 to detect it automatically
|
||||||
* similar to user space strtoul(3).
|
* similar to user space **strtoul**\ (3).
|
||||||
* Return
|
* Return
|
||||||
* Number of characters consumed on success. Must be positive but
|
* Number of characters consumed on success. Must be positive but
|
||||||
* no more than buf_len.
|
* no more than *buf_len*.
|
||||||
*
|
*
|
||||||
* **-EINVAL** if no valid digits were found or unsupported base
|
* **-EINVAL** if no valid digits were found or unsupported base
|
||||||
* was provided.
|
* was provided.
|
||||||
|
@ -2634,26 +2639,26 @@ union bpf_attr {
|
||||||
*
|
*
|
||||||
* void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags)
|
* void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags)
|
||||||
* Description
|
* Description
|
||||||
* Get a bpf-local-storage from a sk.
|
* Get a bpf-local-storage from a *sk*.
|
||||||
*
|
*
|
||||||
* Logically, it could be thought of getting the value from
|
* Logically, it could be thought of getting the value from
|
||||||
* a *map* with *sk* as the **key**. From this
|
* a *map* with *sk* as the **key**. From this
|
||||||
* perspective, the usage is not much different from
|
* perspective, the usage is not much different from
|
||||||
* **bpf_map_lookup_elem(map, &sk)** except this
|
* **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this
|
||||||
* helper enforces the key must be a **bpf_fullsock()**
|
* helper enforces the key must be a full socket and the map must
|
||||||
* and the map must be a BPF_MAP_TYPE_SK_STORAGE also.
|
* be a **BPF_MAP_TYPE_SK_STORAGE** also.
|
||||||
*
|
*
|
||||||
* Underneath, the value is stored locally at *sk* instead of
|
* Underneath, the value is stored locally at *sk* instead of
|
||||||
* the map. The *map* is used as the bpf-local-storage **type**.
|
* the *map*. The *map* is used as the bpf-local-storage
|
||||||
* The bpf-local-storage **type** (i.e. the *map*) is searched
|
* "type". The bpf-local-storage "type" (i.e. the *map*) is
|
||||||
* against all bpf-local-storages residing at sk.
|
* searched against all bpf-local-storages residing at *sk*.
|
||||||
*
|
*
|
||||||
* An optional *flags* (BPF_SK_STORAGE_GET_F_CREATE) can be
|
* An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be
|
||||||
* used such that a new bpf-local-storage will be
|
* used such that a new bpf-local-storage will be
|
||||||
* created if one does not exist. *value* can be used
|
* created if one does not exist. *value* can be used
|
||||||
* together with BPF_SK_STORAGE_GET_F_CREATE to specify
|
* together with **BPF_SK_STORAGE_GET_F_CREATE** to specify
|
||||||
* the initial value of a bpf-local-storage. If *value* is
|
* the initial value of a bpf-local-storage. If *value* is
|
||||||
* NULL, the new bpf-local-storage will be zero initialized.
|
* **NULL**, the new bpf-local-storage will be zero initialized.
|
||||||
* Return
|
* Return
|
||||||
* A bpf-local-storage pointer is returned on success.
|
* A bpf-local-storage pointer is returned on success.
|
||||||
*
|
*
|
||||||
|
@ -2662,7 +2667,7 @@ union bpf_attr {
|
||||||
*
|
*
|
||||||
* int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk)
|
* int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk)
|
||||||
* Description
|
* Description
|
||||||
* Delete a bpf-local-storage from a sk.
|
* Delete a bpf-local-storage from a *sk*.
|
||||||
* Return
|
* Return
|
||||||
* 0 on success.
|
* 0 on success.
|
||||||
*
|
*
|
||||||
|
|
|
@ -44,6 +44,7 @@
|
||||||
#include "btf.h"
|
#include "btf.h"
|
||||||
#include "str_error.h"
|
#include "str_error.h"
|
||||||
#include "libbpf_util.h"
|
#include "libbpf_util.h"
|
||||||
|
#include "libbpf_internal.h"
|
||||||
|
|
||||||
#ifndef EM_BPF
|
#ifndef EM_BPF
|
||||||
#define EM_BPF 247
|
#define EM_BPF 247
|
||||||
|
@ -128,6 +129,10 @@ struct bpf_capabilities {
|
||||||
__u32 name:1;
|
__u32 name:1;
|
||||||
/* v5.2: kernel support for global data sections. */
|
/* v5.2: kernel support for global data sections. */
|
||||||
__u32 global_data:1;
|
__u32 global_data:1;
|
||||||
|
/* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
|
||||||
|
__u32 btf_func:1;
|
||||||
|
/* BTF_KIND_VAR and BTF_KIND_DATASEC support */
|
||||||
|
__u32 btf_datasec:1;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1021,6 +1026,74 @@ static bool section_have_execinstr(struct bpf_object *obj, int idx)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void bpf_object__sanitize_btf(struct bpf_object *obj)
|
||||||
|
{
|
||||||
|
bool has_datasec = obj->caps.btf_datasec;
|
||||||
|
bool has_func = obj->caps.btf_func;
|
||||||
|
struct btf *btf = obj->btf;
|
||||||
|
struct btf_type *t;
|
||||||
|
int i, j, vlen;
|
||||||
|
__u16 kind;
|
||||||
|
|
||||||
|
if (!obj->btf || (has_func && has_datasec))
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (i = 1; i <= btf__get_nr_types(btf); i++) {
|
||||||
|
t = (struct btf_type *)btf__type_by_id(btf, i);
|
||||||
|
kind = BTF_INFO_KIND(t->info);
|
||||||
|
|
||||||
|
if (!has_datasec && kind == BTF_KIND_VAR) {
|
||||||
|
/* replace VAR with INT */
|
||||||
|
t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
|
||||||
|
t->size = sizeof(int);
|
||||||
|
*(int *)(t+1) = BTF_INT_ENC(0, 0, 32);
|
||||||
|
} else if (!has_datasec && kind == BTF_KIND_DATASEC) {
|
||||||
|
/* replace DATASEC with STRUCT */
|
||||||
|
struct btf_var_secinfo *v = (void *)(t + 1);
|
||||||
|
struct btf_member *m = (void *)(t + 1);
|
||||||
|
struct btf_type *vt;
|
||||||
|
char *name;
|
||||||
|
|
||||||
|
name = (char *)btf__name_by_offset(btf, t->name_off);
|
||||||
|
while (*name) {
|
||||||
|
if (*name == '.')
|
||||||
|
*name = '_';
|
||||||
|
name++;
|
||||||
|
}
|
||||||
|
|
||||||
|
vlen = BTF_INFO_VLEN(t->info);
|
||||||
|
t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
|
||||||
|
for (j = 0; j < vlen; j++, v++, m++) {
|
||||||
|
/* order of field assignments is important */
|
||||||
|
m->offset = v->offset * 8;
|
||||||
|
m->type = v->type;
|
||||||
|
/* preserve variable name as member name */
|
||||||
|
vt = (void *)btf__type_by_id(btf, v->type);
|
||||||
|
m->name_off = vt->name_off;
|
||||||
|
}
|
||||||
|
} else if (!has_func && kind == BTF_KIND_FUNC_PROTO) {
|
||||||
|
/* replace FUNC_PROTO with ENUM */
|
||||||
|
vlen = BTF_INFO_VLEN(t->info);
|
||||||
|
t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
|
||||||
|
t->size = sizeof(__u32); /* kernel enforced */
|
||||||
|
} else if (!has_func && kind == BTF_KIND_FUNC) {
|
||||||
|
/* replace FUNC with TYPEDEF */
|
||||||
|
t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
|
||||||
|
{
|
||||||
|
if (!obj->btf_ext)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!obj->caps.btf_func) {
|
||||||
|
btf_ext__free(obj->btf_ext);
|
||||||
|
obj->btf_ext = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
|
static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
|
||||||
{
|
{
|
||||||
Elf *elf = obj->efile.elf;
|
Elf *elf = obj->efile.elf;
|
||||||
|
@ -1164,8 +1237,10 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
|
||||||
obj->btf = NULL;
|
obj->btf = NULL;
|
||||||
} else {
|
} else {
|
||||||
err = btf__finalize_data(obj, obj->btf);
|
err = btf__finalize_data(obj, obj->btf);
|
||||||
if (!err)
|
if (!err) {
|
||||||
|
bpf_object__sanitize_btf(obj);
|
||||||
err = btf__load(obj->btf);
|
err = btf__load(obj->btf);
|
||||||
|
}
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_warning("Error finalizing and loading %s into kernel: %d. Ignored and continue.\n",
|
pr_warning("Error finalizing and loading %s into kernel: %d. Ignored and continue.\n",
|
||||||
BTF_ELF_SEC, err);
|
BTF_ELF_SEC, err);
|
||||||
|
@ -1187,6 +1262,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
|
||||||
BTF_EXT_ELF_SEC,
|
BTF_EXT_ELF_SEC,
|
||||||
PTR_ERR(obj->btf_ext));
|
PTR_ERR(obj->btf_ext));
|
||||||
obj->btf_ext = NULL;
|
obj->btf_ext = NULL;
|
||||||
|
} else {
|
||||||
|
bpf_object__sanitize_btf_ext(obj);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1556,12 +1633,63 @@ bpf_object__probe_global_data(struct bpf_object *obj)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int bpf_object__probe_btf_func(struct bpf_object *obj)
|
||||||
|
{
|
||||||
|
const char strs[] = "\0int\0x\0a";
|
||||||
|
/* void x(int a) {} */
|
||||||
|
__u32 types[] = {
|
||||||
|
/* int */
|
||||||
|
BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||||
|
/* FUNC_PROTO */ /* [2] */
|
||||||
|
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
|
||||||
|
BTF_PARAM_ENC(7, 1),
|
||||||
|
/* FUNC x */ /* [3] */
|
||||||
|
BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
|
||||||
|
};
|
||||||
|
int res;
|
||||||
|
|
||||||
|
res = libbpf__probe_raw_btf((char *)types, sizeof(types),
|
||||||
|
strs, sizeof(strs));
|
||||||
|
if (res < 0)
|
||||||
|
return res;
|
||||||
|
if (res > 0)
|
||||||
|
obj->caps.btf_func = 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
|
||||||
|
{
|
||||||
|
const char strs[] = "\0x\0.data";
|
||||||
|
/* static int a; */
|
||||||
|
__u32 types[] = {
|
||||||
|
/* int */
|
||||||
|
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||||
|
/* VAR x */ /* [2] */
|
||||||
|
BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
|
||||||
|
BTF_VAR_STATIC,
|
||||||
|
/* DATASEC val */ /* [3] */
|
||||||
|
BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
|
||||||
|
BTF_VAR_SECINFO_ENC(2, 0, 4),
|
||||||
|
};
|
||||||
|
int res;
|
||||||
|
|
||||||
|
res = libbpf__probe_raw_btf((char *)types, sizeof(types),
|
||||||
|
strs, sizeof(strs));
|
||||||
|
if (res < 0)
|
||||||
|
return res;
|
||||||
|
if (res > 0)
|
||||||
|
obj->caps.btf_datasec = 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
bpf_object__probe_caps(struct bpf_object *obj)
|
bpf_object__probe_caps(struct bpf_object *obj)
|
||||||
{
|
{
|
||||||
int (*probe_fn[])(struct bpf_object *obj) = {
|
int (*probe_fn[])(struct bpf_object *obj) = {
|
||||||
bpf_object__probe_name,
|
bpf_object__probe_name,
|
||||||
bpf_object__probe_global_data,
|
bpf_object__probe_global_data,
|
||||||
|
bpf_object__probe_btf_func,
|
||||||
|
bpf_object__probe_btf_datasec,
|
||||||
};
|
};
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,27 @@
|
||||||
|
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Internal libbpf helpers.
|
||||||
|
*
|
||||||
|
* Copyright (c) 2019 Facebook
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef __LIBBPF_LIBBPF_INTERNAL_H
|
||||||
|
#define __LIBBPF_LIBBPF_INTERNAL_H
|
||||||
|
|
||||||
|
#define BTF_INFO_ENC(kind, kind_flag, vlen) \
|
||||||
|
((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
|
||||||
|
#define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type)
|
||||||
|
#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
|
||||||
|
((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
|
||||||
|
#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
|
||||||
|
BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
|
||||||
|
BTF_INT_ENC(encoding, bits_offset, bits)
|
||||||
|
#define BTF_MEMBER_ENC(name, type, bits_offset) (name), (type), (bits_offset)
|
||||||
|
#define BTF_PARAM_ENC(name, type) (name), (type)
|
||||||
|
#define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size)
|
||||||
|
|
||||||
|
int libbpf__probe_raw_btf(const char *raw_types, size_t types_len,
|
||||||
|
const char *str_sec, size_t str_len);
|
||||||
|
|
||||||
|
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
|
|
@ -15,6 +15,7 @@
|
||||||
|
|
||||||
#include "bpf.h"
|
#include "bpf.h"
|
||||||
#include "libbpf.h"
|
#include "libbpf.h"
|
||||||
|
#include "libbpf_internal.h"
|
||||||
|
|
||||||
static bool grep(const char *buffer, const char *pattern)
|
static bool grep(const char *buffer, const char *pattern)
|
||||||
{
|
{
|
||||||
|
@ -132,21 +133,43 @@ bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
|
||||||
return errno != EINVAL && errno != EOPNOTSUPP;
|
return errno != EINVAL && errno != EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int load_btf(void)
|
int libbpf__probe_raw_btf(const char *raw_types, size_t types_len,
|
||||||
|
const char *str_sec, size_t str_len)
|
||||||
{
|
{
|
||||||
#define BTF_INFO_ENC(kind, kind_flag, vlen) \
|
struct btf_header hdr = {
|
||||||
((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
|
.magic = BTF_MAGIC,
|
||||||
#define BTF_TYPE_ENC(name, info, size_or_type) \
|
.version = BTF_VERSION,
|
||||||
(name), (info), (size_or_type)
|
.hdr_len = sizeof(struct btf_header),
|
||||||
#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
|
.type_len = types_len,
|
||||||
((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
|
.str_off = types_len,
|
||||||
#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
|
.str_len = str_len,
|
||||||
BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
|
};
|
||||||
BTF_INT_ENC(encoding, bits_offset, bits)
|
int btf_fd, btf_len;
|
||||||
#define BTF_MEMBER_ENC(name, type, bits_offset) \
|
__u8 *raw_btf;
|
||||||
(name), (type), (bits_offset)
|
|
||||||
|
|
||||||
const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
|
btf_len = hdr.hdr_len + hdr.type_len + hdr.str_len;
|
||||||
|
raw_btf = malloc(btf_len);
|
||||||
|
if (!raw_btf)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
memcpy(raw_btf, &hdr, sizeof(hdr));
|
||||||
|
memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len);
|
||||||
|
memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len);
|
||||||
|
|
||||||
|
btf_fd = bpf_load_btf(raw_btf, btf_len, NULL, 0, false);
|
||||||
|
if (btf_fd < 0) {
|
||||||
|
free(raw_btf);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
close(btf_fd);
|
||||||
|
free(raw_btf);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int load_sk_storage_btf(void)
|
||||||
|
{
|
||||||
|
const char strs[] = "\0bpf_spin_lock\0val\0cnt\0l";
|
||||||
/* struct bpf_spin_lock {
|
/* struct bpf_spin_lock {
|
||||||
* int val;
|
* int val;
|
||||||
* };
|
* };
|
||||||
|
@ -155,7 +178,7 @@ static int load_btf(void)
|
||||||
* struct bpf_spin_lock l;
|
* struct bpf_spin_lock l;
|
||||||
* };
|
* };
|
||||||
*/
|
*/
|
||||||
__u32 btf_raw_types[] = {
|
__u32 types[] = {
|
||||||
/* int */
|
/* int */
|
||||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||||
/* struct bpf_spin_lock */ /* [2] */
|
/* struct bpf_spin_lock */ /* [2] */
|
||||||
|
@ -166,23 +189,9 @@ static int load_btf(void)
|
||||||
BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
|
BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
|
||||||
BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
|
BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
|
||||||
};
|
};
|
||||||
struct btf_header btf_hdr = {
|
|
||||||
.magic = BTF_MAGIC,
|
|
||||||
.version = BTF_VERSION,
|
|
||||||
.hdr_len = sizeof(struct btf_header),
|
|
||||||
.type_len = sizeof(btf_raw_types),
|
|
||||||
.str_off = sizeof(btf_raw_types),
|
|
||||||
.str_len = sizeof(btf_str_sec),
|
|
||||||
};
|
|
||||||
__u8 raw_btf[sizeof(struct btf_header) + sizeof(btf_raw_types) +
|
|
||||||
sizeof(btf_str_sec)];
|
|
||||||
|
|
||||||
memcpy(raw_btf, &btf_hdr, sizeof(btf_hdr));
|
return libbpf__probe_raw_btf((char *)types, sizeof(types),
|
||||||
memcpy(raw_btf + sizeof(btf_hdr), btf_raw_types, sizeof(btf_raw_types));
|
strs, sizeof(strs));
|
||||||
memcpy(raw_btf + sizeof(btf_hdr) + sizeof(btf_raw_types),
|
|
||||||
btf_str_sec, sizeof(btf_str_sec));
|
|
||||||
|
|
||||||
return bpf_load_btf(raw_btf, sizeof(raw_btf), 0, 0, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
|
bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
|
||||||
|
@ -222,7 +231,7 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
|
||||||
value_size = 8;
|
value_size = 8;
|
||||||
max_entries = 0;
|
max_entries = 0;
|
||||||
map_flags = BPF_F_NO_PREALLOC;
|
map_flags = BPF_F_NO_PREALLOC;
|
||||||
btf_fd = load_btf();
|
btf_fd = load_sk_storage_btf();
|
||||||
if (btf_fd < 0)
|
if (btf_fd < 0)
|
||||||
return false;
|
return false;
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -32,3 +32,5 @@ test_tcpnotify_user
|
||||||
test_libbpf
|
test_libbpf
|
||||||
test_tcp_check_syncookie_user
|
test_tcp_check_syncookie_user
|
||||||
alu32
|
alu32
|
||||||
|
libbpf.pc
|
||||||
|
libbpf.so.*
|
||||||
|
|
|
@ -178,3 +178,198 @@
|
||||||
.result_unpriv = REJECT,
|
.result_unpriv = REJECT,
|
||||||
.result = ACCEPT,
|
.result = ACCEPT,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"jump test 6",
|
||||||
|
.insns = {
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_1, 2),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 2),
|
||||||
|
BPF_EXIT_INSN(),
|
||||||
|
BPF_JMP_REG(BPF_JNE, BPF_REG_0, BPF_REG_1, 16),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, -20),
|
||||||
|
},
|
||||||
|
.result = ACCEPT,
|
||||||
|
.retval = 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"jump test 7",
|
||||||
|
.insns = {
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 3),
|
||||||
|
BPF_EXIT_INSN(),
|
||||||
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 2, 16),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, -20),
|
||||||
|
},
|
||||||
|
.result = ACCEPT,
|
||||||
|
.retval = 3,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"jump test 8",
|
||||||
|
.insns = {
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_1, 2),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 3),
|
||||||
|
BPF_EXIT_INSN(),
|
||||||
|
BPF_JMP_REG(BPF_JNE, BPF_REG_0, BPF_REG_1, 16),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, -20),
|
||||||
|
},
|
||||||
|
.result = ACCEPT,
|
||||||
|
.retval = 3,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"jump/call test 9",
|
||||||
|
.insns = {
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||||
|
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 3),
|
||||||
|
BPF_EXIT_INSN(),
|
||||||
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 2, 16),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -20),
|
||||||
|
BPF_EXIT_INSN(),
|
||||||
|
},
|
||||||
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||||
|
.result = REJECT,
|
||||||
|
.errstr = "jump out of range from insn 1 to 4",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"jump/call test 10",
|
||||||
|
.insns = {
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||||
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 3),
|
||||||
|
BPF_EXIT_INSN(),
|
||||||
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 2, 16),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -20),
|
||||||
|
BPF_EXIT_INSN(),
|
||||||
|
},
|
||||||
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||||
|
.result = REJECT,
|
||||||
|
.errstr = "last insn is not an exit or jmp",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"jump/call test 11",
|
||||||
|
.insns = {
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||||
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 3),
|
||||||
|
BPF_EXIT_INSN(),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 3),
|
||||||
|
BPF_EXIT_INSN(),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||||
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 2, 26),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -31),
|
||||||
|
BPF_EXIT_INSN(),
|
||||||
|
},
|
||||||
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||||
|
.result = ACCEPT,
|
||||||
|
.retval = 3,
|
||||||
|
},
|
||||||
|
|
Loading…
Reference in New Issue