mirror of https://gitee.com/openkylin/linux.git
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) The bnx2x can hang if you give it a GSO packet with a segment size which is too big for the hardware, detect and drop in this case. From Daniel Axtens. 2) Fix some overflows and pointer leaks in xtables, from Dmitry Vyukov. 3) Missing RCU locking in igmp, from Eric Dumazet. 4) Fix RX checksum handling on r8152, it can only checksum UDP and TCP packets. From Hayes Wang. 5) Minor pacing tweak to TCP BBR congestion control, from Neal Cardwell. 6) Missing RCU annotations in cls_u32, from Paolo Abeni. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (30 commits) Revert "defer call to mem_cgroup_sk_alloc()" soreuseport: fix mem leak in reuseport_add_sock() net: qlge: use memmove instead of skb_copy_to_linear_data net: qed: use correct strncpy() size net: cxgb4: avoid memcpy beyond end of source buffer cls_u32: add missing RCU annotation. r8152: set rx mode early when linking on r8152: fix wrong checksum status for received IPv4 packets nfp: fix TLV offset calculation net: pxa168_eth: add netconsole support net: igmp: add a missing rcu locking section ibmvnic: fix firmware version when no firmware level has been provided by the VIOS server vmxnet3: remove redundant initialization of pointer 'rq' lan78xx: remove redundant initialization of pointer 'phydev' net: jme: remove unused initialization of 'rxdesc' rtnetlink: remove check for IFLA_IF_NETNSID rocker: fix possible null pointer dereference in rocker_router_fib_event_work inet: Avoid unitialized variable warning in inet_unhash() net: bridge: Fix uninitialized error in br_fdb_sync_static() openvswitch: Remove padding from packet before L3+ conntrack processing ...
This commit is contained in:
commit
c80c238a28
|
@ -12934,6 +12934,24 @@ static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
|
|||
struct net_device *dev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
/*
|
||||
* A skb with gso_size + header length > 9700 will cause a
|
||||
* firmware panic. Drop GSO support.
|
||||
*
|
||||
* Eventually the upper layer should not pass these packets down.
|
||||
*
|
||||
* For speed, if the gso_size is <= 9000, assume there will
|
||||
* not be 700 bytes of headers and pass it through. Only do a
|
||||
* full (slow) validation if the gso_size is > 9000.
|
||||
*
|
||||
* (Due to the way SKB_BY_FRAGS works this will also do a full
|
||||
* validation in that case.)
|
||||
*/
|
||||
if (unlikely(skb_is_gso(skb) &&
|
||||
(skb_shinfo(skb)->gso_size > 9000) &&
|
||||
!skb_gso_validate_mac_len(skb, 9700)))
|
||||
features &= ~NETIF_F_GSO_MASK;
|
||||
|
||||
features = vlan_features_check(skb, features);
|
||||
return vxlan_features_check(skb, features);
|
||||
}
|
||||
|
|
|
@ -355,7 +355,7 @@ struct cxgb4_lld_info {
|
|||
};
|
||||
|
||||
struct cxgb4_uld_info {
|
||||
const char *name;
|
||||
char name[IFNAMSIZ];
|
||||
void *handle;
|
||||
unsigned int nrxq;
|
||||
unsigned int rxq_size;
|
||||
|
|
|
@ -991,9 +991,8 @@ static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
|
|||
{
|
||||
u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
|
||||
struct device *dev = &adapter->pdev->dev;
|
||||
struct be_queue_info *txq = &txo->q;
|
||||
bool map_single = false;
|
||||
u32 head = txq->head;
|
||||
u32 head;
|
||||
dma_addr_t busaddr;
|
||||
int len;
|
||||
|
||||
|
|
|
@ -3305,7 +3305,11 @@ static void handle_vpd_rsp(union ibmvnic_crq *crq,
|
|||
*/
|
||||
substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
|
||||
if (!substr) {
|
||||
dev_info(dev, "No FW level provided by VPD\n");
|
||||
dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
|
||||
ptr = strncpy((char *)adapter->fw_version, "N/A",
|
||||
3 * sizeof(char));
|
||||
if (!ptr)
|
||||
dev_err(dev, "Failed to inform that firmware version is unavailable to the adapter\n");
|
||||
goto complete;
|
||||
}
|
||||
|
||||
|
|
|
@ -1071,7 +1071,7 @@ static int
|
|||
jme_process_receive(struct jme_adapter *jme, int limit)
|
||||
{
|
||||
struct jme_ring *rxring = &(jme->rxring[0]);
|
||||
struct rxdesc *rxdesc = rxring->desc;
|
||||
struct rxdesc *rxdesc;
|
||||
int i, j, ccnt, desccnt, mask = jme->rx_ring_mask;
|
||||
|
||||
if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
|
||||
|
|
|
@ -1362,6 +1362,15 @@ static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void pxa168_eth_netpoll(struct net_device *dev)
|
||||
{
|
||||
disable_irq(dev->irq);
|
||||
pxa168_eth_int_handler(dev->irq, dev);
|
||||
enable_irq(dev->irq);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void pxa168_get_drvinfo(struct net_device *dev,
|
||||
struct ethtool_drvinfo *info)
|
||||
{
|
||||
|
@ -1390,6 +1399,9 @@ static const struct net_device_ops pxa168_eth_netdev_ops = {
|
|||
.ndo_do_ioctl = pxa168_eth_do_ioctl,
|
||||
.ndo_change_mtu = pxa168_eth_change_mtu,
|
||||
.ndo_tx_timeout = pxa168_eth_tx_timeout,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = pxa168_eth_netpoll,
|
||||
#endif
|
||||
};
|
||||
|
||||
static int pxa168_eth_probe(struct platform_device *pdev)
|
||||
|
|
|
@ -65,7 +65,7 @@ int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
|
|||
u32 hdr = readl(data);
|
||||
|
||||
length = FIELD_GET(NFP_NET_CFG_TLV_HEADER_LENGTH, hdr);
|
||||
offset = data - ctrl_mem + NFP_NET_CFG_TLV_BASE;
|
||||
offset = data - ctrl_mem;
|
||||
|
||||
/* Advance past the header */
|
||||
data += 4;
|
||||
|
|
|
@ -3649,10 +3649,8 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
|
|||
BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
|
||||
: 128;
|
||||
|
||||
strncpy(type_name, big_ram->instance_name,
|
||||
strlen(big_ram->instance_name));
|
||||
strncpy(mem_name, big_ram->instance_name,
|
||||
strlen(big_ram->instance_name));
|
||||
strscpy(type_name, big_ram->instance_name, sizeof(type_name));
|
||||
strscpy(mem_name, big_ram->instance_name, sizeof(mem_name));
|
||||
|
||||
/* Dump memory header */
|
||||
offset += qed_grc_dump_mem_hdr(p_hwfn,
|
||||
|
|
|
@ -1747,8 +1747,7 @@ static void ql_realign_skb(struct sk_buff *skb, int len)
|
|||
*/
|
||||
skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
|
||||
skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
|
||||
skb_copy_to_linear_data(skb, temp_addr,
|
||||
(unsigned int)len);
|
||||
memmove(skb->data, temp_addr, len);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -2902,6 +2902,12 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
goto err_alloc_ordered_workqueue;
|
||||
}
|
||||
|
||||
err = rocker_probe_ports(rocker);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to probe ports\n");
|
||||
goto err_probe_ports;
|
||||
}
|
||||
|
||||
/* Only FIBs pointing to our own netdevs are programmed into
|
||||
* the device, so no need to pass a callback.
|
||||
*/
|
||||
|
@ -2918,22 +2924,16 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
|
||||
rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
|
||||
|
||||
err = rocker_probe_ports(rocker);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to probe ports\n");
|
||||
goto err_probe_ports;
|
||||
}
|
||||
|
||||
dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
|
||||
(int)sizeof(rocker->hw.id), &rocker->hw.id);
|
||||
|
||||
return 0;
|
||||
|
||||
err_probe_ports:
|
||||
unregister_switchdev_notifier(&rocker_switchdev_notifier);
|
||||
err_register_switchdev_notifier:
|
||||
unregister_fib_notifier(&rocker->fib_nb);
|
||||
err_register_fib_notifier:
|
||||
rocker_remove_ports(rocker);
|
||||
err_probe_ports:
|
||||
destroy_workqueue(rocker->rocker_owq);
|
||||
err_alloc_ordered_workqueue:
|
||||
free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
|
||||
|
@ -2961,9 +2961,9 @@ static void rocker_remove(struct pci_dev *pdev)
|
|||
{
|
||||
struct rocker *rocker = pci_get_drvdata(pdev);
|
||||
|
||||
rocker_remove_ports(rocker);
|
||||
unregister_switchdev_notifier(&rocker_switchdev_notifier);
|
||||
unregister_fib_notifier(&rocker->fib_nb);
|
||||
rocker_remove_ports(rocker);
|
||||
rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
|
||||
destroy_workqueue(rocker->rocker_owq);
|
||||
free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
|
||||
|
|
|
@ -2006,7 +2006,7 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
|
|||
{
|
||||
int ret;
|
||||
u32 mii_adv;
|
||||
struct phy_device *phydev = dev->net->phydev;
|
||||
struct phy_device *phydev;
|
||||
|
||||
phydev = phy_find_first(dev->mdiobus);
|
||||
if (!phydev) {
|
||||
|
|
|
@ -1848,11 +1848,9 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
|
|||
if (opts2 & RD_IPV4_CS) {
|
||||
if (opts3 & IPF)
|
||||
checksum = CHECKSUM_NONE;
|
||||
else if ((opts2 & RD_UDP_CS) && (opts3 & UDPF))
|
||||
checksum = CHECKSUM_NONE;
|
||||
else if ((opts2 & RD_TCP_CS) && (opts3 & TCPF))
|
||||
checksum = CHECKSUM_NONE;
|
||||
else
|
||||
else if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF))
|
||||
checksum = CHECKSUM_UNNECESSARY;
|
||||
else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF))
|
||||
checksum = CHECKSUM_UNNECESSARY;
|
||||
} else if (opts2 & RD_IPV6_CS) {
|
||||
if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF))
|
||||
|
@ -3797,11 +3795,12 @@ static void set_carrier(struct r8152 *tp)
|
|||
if (speed & LINK_STATUS) {
|
||||
if (!netif_carrier_ok(netdev)) {
|
||||
tp->rtl_ops.enable(tp);
|
||||
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
|
||||
netif_stop_queue(netdev);
|
||||
napi_disable(napi);
|
||||
netif_carrier_on(netdev);
|
||||
rtl_start_rx(tp);
|
||||
clear_bit(RTL8152_SET_RX_MODE, &tp->flags);
|
||||
_rtl8152_set_rx_mode(netdev);
|
||||
napi_enable(&tp->napi);
|
||||
netif_wake_queue(netdev);
|
||||
netif_info(tp, link, netdev, "carrier on\n");
|
||||
|
@ -4261,7 +4260,7 @@ static int rtl8152_post_reset(struct usb_interface *intf)
|
|||
mutex_lock(&tp->control);
|
||||
tp->rtl_ops.enable(tp);
|
||||
rtl_start_rx(tp);
|
||||
rtl8152_set_rx_mode(netdev);
|
||||
_rtl8152_set_rx_mode(netdev);
|
||||
mutex_unlock(&tp->control);
|
||||
}
|
||||
|
||||
|
|
|
@ -2760,9 +2760,6 @@ static void
|
|||
vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
|
||||
{
|
||||
size_t sz, i, ring0_size, ring1_size, comp_size;
|
||||
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0];
|
||||
|
||||
|
||||
if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
|
||||
VMXNET3_MAX_ETH_HDR_SIZE) {
|
||||
adapter->skb_buf_size = adapter->netdev->mtu +
|
||||
|
@ -2794,7 +2791,8 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
|
|||
comp_size = ring0_size + ring1_size;
|
||||
|
||||
for (i = 0; i < adapter->num_rx_queues; i++) {
|
||||
rq = &adapter->rx_queue[i];
|
||||
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
|
||||
|
||||
rq->rx_ring[0].size = ring0_size;
|
||||
rq->rx_ring[1].size = ring1_size;
|
||||
rq->comp_ring.size = comp_size;
|
||||
|
|
|
@ -3287,6 +3287,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
|
|||
void skb_scrub_packet(struct sk_buff *skb, bool xnet);
|
||||
unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
|
||||
bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
|
||||
bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
|
||||
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
|
||||
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
|
||||
int skb_ensure_writable(struct sk_buff *skb, int write_len);
|
||||
|
@ -4120,6 +4121,21 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
|
|||
return hdr_len + skb_gso_transport_seglen(skb);
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_gso_mac_seglen - Return length of individual segments of a gso packet
|
||||
*
|
||||
* @skb: GSO skb
|
||||
*
|
||||
* skb_gso_mac_seglen is used to determine the real size of the
|
||||
* individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
|
||||
* headers (TCP/UDP).
|
||||
*/
|
||||
static inline unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
|
||||
{
|
||||
unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
|
||||
return hdr_len + skb_gso_transport_seglen(skb);
|
||||
}
|
||||
|
||||
/* Local Checksum Offload.
|
||||
* Compute outer checksum based on the assumption that the
|
||||
* inner checksum will be offloaded later.
|
||||
|
|
|
@ -5747,6 +5747,20 @@ void mem_cgroup_sk_alloc(struct sock *sk)
|
|||
if (!mem_cgroup_sockets_enabled)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Socket cloning can throw us here with sk_memcg already
|
||||
* filled. It won't however, necessarily happen from
|
||||
* process context. So the test for root memcg given
|
||||
* the current task's memcg won't help us in this case.
|
||||
*
|
||||
* Respecting the original socket's memcg is a better
|
||||
* decision in this case.
|
||||
*/
|
||||
if (sk->sk_memcg) {
|
||||
css_get(&sk->sk_memcg->css);
|
||||
return;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
memcg = mem_cgroup_from_task(current);
|
||||
if (memcg == root_mem_cgroup)
|
||||
|
|
|
@ -993,7 +993,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
|
|||
int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
|
||||
{
|
||||
struct net_bridge_fdb_entry *f, *tmp;
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
|
|
|
@ -2802,9 +2802,6 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (tb[IFLA_IF_NETNSID])
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (tb[IFLA_IFNAME])
|
||||
nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
|
||||
else
|
||||
|
|
|
@ -4913,6 +4913,45 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
|
||||
|
||||
/**
|
||||
* skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
|
||||
*
|
||||
* There are a couple of instances where we have a GSO skb, and we
|
||||
* want to determine what size it would be after it is segmented.
|
||||
*
|
||||
* We might want to check:
|
||||
* - L3+L4+payload size (e.g. IP forwarding)
|
||||
* - L2+L3+L4+payload size (e.g. sanity check before passing to driver)
|
||||
*
|
||||
* This is a helper to do that correctly considering GSO_BY_FRAGS.
|
||||
*
|
||||
* @seg_len: The segmented length (from skb_gso_*_seglen). In the
|
||||
* GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
|
||||
*
|
||||
* @max_len: The maximum permissible length.
|
||||
*
|
||||
* Returns true if the segmented length <= max length.
|
||||
*/
|
||||
static inline bool skb_gso_size_check(const struct sk_buff *skb,
|
||||
unsigned int seg_len,
|
||||
unsigned int max_len) {
|
||||
const struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
const struct sk_buff *iter;
|
||||
|
||||
if (shinfo->gso_size != GSO_BY_FRAGS)
|
||||
return seg_len <= max_len;
|
||||
|
||||
/* Undo this so we can re-use header sizes */
|
||||
seg_len -= GSO_BY_FRAGS;
|
||||
|
||||
skb_walk_frags(skb, iter) {
|
||||
if (seg_len + skb_headlen(iter) > max_len)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_gso_validate_mtu - Return in case such skb fits a given MTU
|
||||
*
|
||||
|
@ -4924,27 +4963,25 @@ EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
|
|||
*/
|
||||
bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu)
|
||||
{
|
||||
const struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
const struct sk_buff *iter;
|
||||
unsigned int hlen;
|
||||
|
||||
hlen = skb_gso_network_seglen(skb);
|
||||
|
||||
if (shinfo->gso_size != GSO_BY_FRAGS)
|
||||
return hlen <= mtu;
|
||||
|
||||
/* Undo this so we can re-use header sizes */
|
||||
hlen -= GSO_BY_FRAGS;
|
||||
|
||||
skb_walk_frags(skb, iter) {
|
||||
if (hlen + skb_headlen(iter) > mtu)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(skb_gso_validate_mtu);
|
||||
|
||||
/**
|
||||
* skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
|
||||
*
|
||||
* @skb: GSO skb
|
||||
* @len: length to validate against
|
||||
*
|
||||
* skb_gso_validate_mac_len validates if a given skb will fit a wanted
|
||||
* length once split, including L2, L3 and L4 headers and the payload.
|
||||
*/
|
||||
bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)
|
||||
{
|
||||
return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
|
||||
|
||||
static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
|
||||
{
|
||||
if (skb_cow(skb, skb_headroom(skb)) < 0) {
|
||||
|
|
|
@ -1683,16 +1683,13 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
|
|||
newsk->sk_dst_pending_confirm = 0;
|
||||
newsk->sk_wmem_queued = 0;
|
||||
newsk->sk_forward_alloc = 0;
|
||||
|
||||
/* sk->sk_memcg will be populated at accept() time */
|
||||
newsk->sk_memcg = NULL;
|
||||
|
||||
atomic_set(&newsk->sk_drops, 0);
|
||||
newsk->sk_send_head = NULL;
|
||||
newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
|
||||
atomic_set(&newsk->sk_zckey, 0);
|
||||
|
||||
sock_reset_flag(newsk, SOCK_DONE);
|
||||
mem_cgroup_sk_alloc(newsk);
|
||||
cgroup_sk_alloc(&newsk->sk_cgrp_data);
|
||||
|
||||
rcu_read_lock();
|
||||
|
|
|
@ -94,6 +94,16 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
|
|||
return more_reuse;
|
||||
}
|
||||
|
||||
static void reuseport_free_rcu(struct rcu_head *head)
|
||||
{
|
||||
struct sock_reuseport *reuse;
|
||||
|
||||
reuse = container_of(head, struct sock_reuseport, rcu);
|
||||
if (reuse->prog)
|
||||
bpf_prog_destroy(reuse->prog);
|
||||
kfree(reuse);
|
||||
}
|
||||
|
||||
/**
|
||||
* reuseport_add_sock - Add a socket to the reuseport group of another.
|
||||
* @sk: New socket to add to the group.
|
||||
|
@ -102,7 +112,7 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
|
|||
*/
|
||||
int reuseport_add_sock(struct sock *sk, struct sock *sk2)
|
||||
{
|
||||
struct sock_reuseport *reuse;
|
||||
struct sock_reuseport *old_reuse, *reuse;
|
||||
|
||||
if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
|
||||
int err = reuseport_alloc(sk2);
|
||||
|
@ -113,10 +123,13 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2)
|
|||
|
||||
spin_lock_bh(&reuseport_lock);
|
||||
reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
|
||||
lockdep_is_held(&reuseport_lock)),
|
||||
WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb,
|
||||
lockdep_is_held(&reuseport_lock)),
|
||||
"socket already in reuseport group");
|
||||
lockdep_is_held(&reuseport_lock));
|
||||
old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
|
||||
lockdep_is_held(&reuseport_lock));
|
||||
if (old_reuse && old_reuse->num_socks != 1) {
|
||||
spin_unlock_bh(&reuseport_lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (reuse->num_socks == reuse->max_socks) {
|
||||
reuse = reuseport_grow(reuse);
|
||||
|
@ -134,19 +147,11 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2)
|
|||
|
||||
spin_unlock_bh(&reuseport_lock);
|
||||
|
||||
if (old_reuse)
|
||||
call_rcu(&old_reuse->rcu, reuseport_free_rcu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void reuseport_free_rcu(struct rcu_head *head)
|
||||
{
|
||||
struct sock_reuseport *reuse;
|
||||
|
||||
reuse = container_of(head, struct sock_reuseport, rcu);
|
||||
if (reuse->prog)
|
||||
bpf_prog_destroy(reuse->prog);
|
||||
kfree(reuse);
|
||||
}
|
||||
|
||||
void reuseport_detach_sock(struct sock *sk)
|
||||
{
|
||||
struct sock_reuseport *reuse;
|
||||
|
|
|
@ -386,7 +386,11 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
|
|||
pip->frag_off = htons(IP_DF);
|
||||
pip->ttl = 1;
|
||||
pip->daddr = fl4.daddr;
|
||||
|
||||
rcu_read_lock();
|
||||
pip->saddr = igmpv3_get_srcaddr(dev, &fl4);
|
||||
rcu_read_unlock();
|
||||
|
||||
pip->protocol = IPPROTO_IGMP;
|
||||
pip->tot_len = 0; /* filled in later */
|
||||
ip_select_ident(net, skb, NULL);
|
||||
|
|
|
@ -475,7 +475,6 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
|
|||
}
|
||||
spin_unlock_bh(&queue->fastopenq.lock);
|
||||
}
|
||||
mem_cgroup_sk_alloc(newsk);
|
||||
out:
|
||||
release_sock(sk);
|
||||
if (req)
|
||||
|
|
|
@ -625,9 +625,8 @@ EXPORT_SYMBOL_GPL(inet_hash);
|
|||
void inet_unhash(struct sock *sk)
|
||||
{
|
||||
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
|
||||
struct inet_listen_hashbucket *ilb;
|
||||
struct inet_listen_hashbucket *ilb = NULL;
|
||||
spinlock_t *lock;
|
||||
bool listener = false;
|
||||
|
||||
if (sk_unhashed(sk))
|
||||
return;
|
||||
|
@ -635,7 +634,6 @@ void inet_unhash(struct sock *sk)
|
|||
if (sk->sk_state == TCP_LISTEN) {
|
||||
ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
|
||||
lock = &ilb->lock;
|
||||
listener = true;
|
||||
} else {
|
||||
lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
|
||||
}
|
||||
|
@ -645,7 +643,7 @@ void inet_unhash(struct sock *sk)
|
|||
|
||||
if (rcu_access_pointer(sk->sk_reuseport_cb))
|
||||
reuseport_detach_sock(sk);
|
||||
if (listener) {
|
||||
if (ilb) {
|
||||
inet_unhash2(hashinfo, sk);
|
||||
__sk_del_node_init(sk);
|
||||
ilb->count--;
|
||||
|
|
|
@ -1255,11 +1255,8 @@ int ip_setsockopt(struct sock *sk, int level,
|
|||
if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
|
||||
optname != IP_IPSEC_POLICY &&
|
||||
optname != IP_XFRM_POLICY &&
|
||||
!ip_mroute_opt(optname)) {
|
||||
lock_sock(sk);
|
||||
!ip_mroute_opt(optname))
|
||||
err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
|
||||
release_sock(sk);
|
||||
}
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
|
@ -1284,12 +1281,9 @@ int compat_ip_setsockopt(struct sock *sk, int level, int optname,
|
|||
if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
|
||||
optname != IP_IPSEC_POLICY &&
|
||||
optname != IP_XFRM_POLICY &&
|
||||
!ip_mroute_opt(optname)) {
|
||||
lock_sock(sk);
|
||||
err = compat_nf_setsockopt(sk, PF_INET, optname,
|
||||
optval, optlen);
|
||||
release_sock(sk);
|
||||
}
|
||||
!ip_mroute_opt(optname))
|
||||
err = compat_nf_setsockopt(sk, PF_INET, optname, optval,
|
||||
optlen);
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -431,7 +431,7 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
|
|||
struct ipt_clusterip_tgt_info *cipinfo = par->targinfo;
|
||||
const struct ipt_entry *e = par->entryinfo;
|
||||
struct clusterip_config *config;
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
if (par->nft_compat) {
|
||||
pr_err("cannot use CLUSTERIP target from nftables compat\n");
|
||||
|
@ -450,8 +450,18 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
|
|||
pr_info("Please specify destination IP\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* FIXME: further sanity checks */
|
||||
if (cipinfo->num_local_nodes > ARRAY_SIZE(cipinfo->local_nodes)) {
|
||||
pr_info("bad num_local_nodes %u\n", cipinfo->num_local_nodes);
|
||||
return -EINVAL;
|
||||
}
|
||||
for (i = 0; i < cipinfo->num_local_nodes; i++) {
|
||||
if (cipinfo->local_nodes[i] - 1 >=
|
||||
sizeof(config->local_nodes) * 8) {
|
||||
pr_info("bad local_nodes[%d] %u\n",
|
||||
i, cipinfo->local_nodes[i]);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
config = clusterip_config_find_get(par->net, e->ip.dst.s_addr, 1);
|
||||
if (!config) {
|
||||
|
|
|
@ -213,15 +213,19 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
|
|||
struct nf_conntrack_tuple tuple;
|
||||
|
||||
memset(&tuple, 0, sizeof(tuple));
|
||||
|
||||
lock_sock(sk);
|
||||
tuple.src.u3.ip = inet->inet_rcv_saddr;
|
||||
tuple.src.u.tcp.port = inet->inet_sport;
|
||||
tuple.dst.u3.ip = inet->inet_daddr;
|
||||
tuple.dst.u.tcp.port = inet->inet_dport;
|
||||
tuple.src.l3num = PF_INET;
|
||||
tuple.dst.protonum = sk->sk_protocol;
|
||||
release_sock(sk);
|
||||
|
||||
/* We only do TCP and SCTP at the moment: is there a better way? */
|
||||
if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP) {
|
||||
if (tuple.dst.protonum != IPPROTO_TCP &&
|
||||
tuple.dst.protonum != IPPROTO_SCTP) {
|
||||
pr_debug("SO_ORIGINAL_DST: Not a TCP/SCTP socket\n");
|
||||
return -ENOPROTOOPT;
|
||||
}
|
||||
|
|
|
@ -481,7 +481,8 @@ static void bbr_advance_cycle_phase(struct sock *sk)
|
|||
|
||||
bbr->cycle_idx = (bbr->cycle_idx + 1) & (CYCLE_LEN - 1);
|
||||
bbr->cycle_mstamp = tp->delivered_mstamp;
|
||||
bbr->pacing_gain = bbr_pacing_gain[bbr->cycle_idx];
|
||||
bbr->pacing_gain = bbr->lt_use_bw ? BBR_UNIT :
|
||||
bbr_pacing_gain[bbr->cycle_idx];
|
||||
}
|
||||
|
||||
/* Gain cycling: cycle pacing gain to converge to fair share of available bw. */
|
||||
|
@ -490,8 +491,7 @@ static void bbr_update_cycle_phase(struct sock *sk,
|
|||
{
|
||||
struct bbr *bbr = inet_csk_ca(sk);
|
||||
|
||||
if ((bbr->mode == BBR_PROBE_BW) && !bbr->lt_use_bw &&
|
||||
bbr_is_next_cycle_phase(sk, rs))
|
||||
if (bbr->mode == BBR_PROBE_BW && bbr_is_next_cycle_phase(sk, rs))
|
||||
bbr_advance_cycle_phase(sk);
|
||||
}
|
||||
|
||||
|
|
|
@ -923,12 +923,8 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname,
|
|||
#ifdef CONFIG_NETFILTER
|
||||
/* we need to exclude all possible ENOPROTOOPTs except default case */
|
||||
if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY &&
|
||||
optname != IPV6_XFRM_POLICY) {
|
||||
lock_sock(sk);
|
||||
err = nf_setsockopt(sk, PF_INET6, optname, optval,
|
||||
optlen);
|
||||
release_sock(sk);
|
||||
}
|
||||
optname != IPV6_XFRM_POLICY)
|
||||
err = nf_setsockopt(sk, PF_INET6, optname, optval, optlen);
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
|
@ -958,12 +954,9 @@ int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
|
|||
#ifdef CONFIG_NETFILTER
|
||||
/* we need to exclude all possible ENOPROTOOPTs except default case */
|
||||
if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY &&
|
||||
optname != IPV6_XFRM_POLICY) {
|
||||
lock_sock(sk);
|
||||
err = compat_nf_setsockopt(sk, PF_INET6, optname,
|
||||
optval, optlen);
|
||||
release_sock(sk);
|
||||
}
|
||||
optname != IPV6_XFRM_POLICY)
|
||||
err = compat_nf_setsockopt(sk, PF_INET6, optname, optval,
|
||||
optlen);
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -221,20 +221,27 @@ static const struct nf_hook_ops ipv6_conntrack_ops[] = {
|
|||
static int
|
||||
ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
|
||||
{
|
||||
const struct inet_sock *inet = inet_sk(sk);
|
||||
struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 };
|
||||
const struct ipv6_pinfo *inet6 = inet6_sk(sk);
|
||||
const struct inet_sock *inet = inet_sk(sk);
|
||||
const struct nf_conntrack_tuple_hash *h;
|
||||
struct sockaddr_in6 sin6;
|
||||
struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 };
|
||||
struct nf_conn *ct;
|
||||
__be32 flow_label;
|
||||
int bound_dev_if;
|
||||
|
||||
lock_sock(sk);
|
||||
tuple.src.u3.in6 = sk->sk_v6_rcv_saddr;
|
||||
tuple.src.u.tcp.port = inet->inet_sport;
|
||||
tuple.dst.u3.in6 = sk->sk_v6_daddr;
|
||||
tuple.dst.u.tcp.port = inet->inet_dport;
|
||||
tuple.dst.protonum = sk->sk_protocol;
|
||||
bound_dev_if = sk->sk_bound_dev_if;
|
||||
flow_label = inet6->flow_label;
|
||||
release_sock(sk);
|
||||
|
||||
if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP)
|
||||
if (tuple.dst.protonum != IPPROTO_TCP &&
|
||||
tuple.dst.protonum != IPPROTO_SCTP)
|
||||
return -ENOPROTOOPT;
|
||||
|
||||
if (*len < 0 || (unsigned int) *len < sizeof(sin6))
|
||||
|
@ -252,14 +259,13 @@ ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
|
|||
|
||||
sin6.sin6_family = AF_INET6;
|
||||
sin6.sin6_port = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.tcp.port;
|
||||
sin6.sin6_flowinfo = inet6->flow_label & IPV6_FLOWINFO_MASK;
|
||||
sin6.sin6_flowinfo = flow_label & IPV6_FLOWINFO_MASK;
|
||||
memcpy(&sin6.sin6_addr,
|
||||
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in6,
|
||||
sizeof(sin6.sin6_addr));
|
||||
|
||||
nf_ct_put(ct);
|
||||
sin6.sin6_scope_id = ipv6_iface_scope_id(&sin6.sin6_addr,
|
||||
sk->sk_bound_dev_if);
|
||||
sin6.sin6_scope_id = ipv6_iface_scope_id(&sin6.sin6_addr, bound_dev_if);
|
||||
return copy_to_user(user, &sin6, sizeof(sin6)) ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -168,7 +168,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
struct hash_ipportnet4_elem e = { .cidr = HOST_MASK - 1 };
|
||||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||
u32 ip = 0, ip_to = 0, p = 0, port, port_to;
|
||||
u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2;
|
||||
u32 ip2_from = 0, ip2_to = 0, ip2;
|
||||
bool with_ports = false;
|
||||
u8 cidr;
|
||||
int ret;
|
||||
|
@ -269,22 +269,21 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr + 1);
|
||||
}
|
||||
|
||||
if (retried)
|
||||
if (retried) {
|
||||
ip = ntohl(h->next.ip);
|
||||
p = ntohs(h->next.port);
|
||||
ip2 = ntohl(h->next.ip2);
|
||||
} else {
|
||||
p = port;
|
||||
ip2 = ip2_from;
|
||||
}
|
||||
for (; ip <= ip_to; ip++) {
|
||||
e.ip = htonl(ip);
|
||||
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
|
||||
: port;
|
||||
for (; p <= port_to; p++) {
|
||||
e.port = htons(p);
|
||||
ip2 = retried &&
|
||||
ip == ntohl(h->next.ip) &&
|
||||
p == ntohs(h->next.port)
|
||||
? ntohl(h->next.ip2) : ip2_from;
|
||||
while (ip2 <= ip2_to) {
|
||||
do {
|
||||
e.ip2 = htonl(ip2);
|
||||
ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
|
||||
&cidr);
|
||||
ip2 = ip_set_range_to_cidr(ip2, ip2_to, &cidr);
|
||||
e.cidr = cidr - 1;
|
||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||
|
||||
|
@ -292,9 +291,10 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
return ret;
|
||||
|
||||
ret = 0;
|
||||
ip2 = ip2_last + 1;
|
||||
}
|
||||
} while (ip2++ < ip2_to);
|
||||
ip2 = ip2_from;
|
||||
}
|
||||
p = port;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -143,7 +143,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_net4_elem e = { .cidr = HOST_MASK };
|
||||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||
u32 ip = 0, ip_to = 0, last;
|
||||
u32 ip = 0, ip_to = 0;
|
||||
int ret;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
|
@ -193,16 +193,15 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
}
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip);
|
||||
while (ip <= ip_to) {
|
||||
do {
|
||||
e.ip = htonl(ip);
|
||||
last = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
|
||||
ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
|
||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||
if (ret && !ip_set_eexist(ret, flags))
|
||||
return ret;
|
||||
|
||||
ret = 0;
|
||||
ip = last + 1;
|
||||
}
|
||||
} while (ip++ < ip_to);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -200,7 +200,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 };
|
||||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||
u32 ip = 0, ip_to = 0, last;
|
||||
u32 ip = 0, ip_to = 0;
|
||||
int ret;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
|
@ -255,17 +255,16 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip);
|
||||
while (ip <= ip_to) {
|
||||
do {
|
||||
e.ip = htonl(ip);
|
||||
last = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
|
||||
ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
|
||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||
|
||||
if (ret && !ip_set_eexist(ret, flags))
|
||||
return ret;
|
||||
|
||||
ret = 0;
|
||||
ip = last + 1;
|
||||
}
|
||||
} while (ip++ < ip_to);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -169,8 +169,8 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_netnet4_elem e = { };
|
||||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||
u32 ip = 0, ip_to = 0, last;
|
||||
u32 ip2 = 0, ip2_from = 0, ip2_to = 0, last2;
|
||||
u32 ip = 0, ip_to = 0;
|
||||
u32 ip2 = 0, ip2_from = 0, ip2_to = 0;
|
||||
int ret;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
|
@ -247,27 +247,27 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
|
||||
}
|
||||
|
||||
if (retried)
|
||||
if (retried) {
|
||||
ip = ntohl(h->next.ip[0]);
|
||||
ip2 = ntohl(h->next.ip[1]);
|
||||
} else {
|
||||
ip2 = ip2_from;
|
||||
}
|
||||
|
||||
while (ip <= ip_to) {
|
||||
do {
|
||||
e.ip[0] = htonl(ip);
|
||||
last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
|
||||
ip2 = (retried &&
|
||||
ip == ntohl(h->next.ip[0])) ? ntohl(h->next.ip[1])
|
||||
: ip2_from;
|
||||
while (ip2 <= ip2_to) {
|
||||
ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
|
||||
do {
|
||||
e.ip[1] = htonl(ip2);
|
||||
last2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]);
|
||||
ip2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]);
|
||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||
if (ret && !ip_set_eexist(ret, flags))
|
||||
return ret;
|
||||
|
||||
ret = 0;
|
||||
ip2 = last2 + 1;
|
||||
}
|
||||
ip = last + 1;
|
||||
}
|
||||
} while (ip2++ < ip2_to);
|
||||
ip2 = ip2_from;
|
||||
} while (ip++ < ip_to);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -161,7 +161,7 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 };
|
||||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||
u32 port, port_to, p = 0, ip = 0, ip_to = 0, last;
|
||||
u32 port, port_to, p = 0, ip = 0, ip_to = 0;
|
||||
bool with_ports = false;
|
||||
u8 cidr;
|
||||
int ret;
|
||||
|
@ -239,25 +239,26 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
ip_set_mask_from_to(ip, ip_to, e.cidr + 1);
|
||||
}
|
||||
|
||||
if (retried)
|
||||
if (retried) {
|
||||
ip = ntohl(h->next.ip);
|
||||
while (ip <= ip_to) {
|
||||
p = ntohs(h->next.port);
|
||||
} else {
|
||||
p = port;
|
||||
}
|
||||
do {
|
||||
e.ip = htonl(ip);
|
||||
last = ip_set_range_to_cidr(ip, ip_to, &cidr);
|
||||
ip = ip_set_range_to_cidr(ip, ip_to, &cidr);
|
||||
e.cidr = cidr - 1;
|
||||
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
|
||||
: port;
|
||||
for (; p <= port_to; p++) {
|
||||
e.port = htons(p);
|
||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||
|
||||
if (ret && !ip_set_eexist(ret, flags))
|
||||
return ret;
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
ip = last + 1;
|
||||
}
|
||||
p = port;
|
||||
} while (ip++ < ip_to);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -184,8 +184,8 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_netportnet4_elem e = { };
|
||||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||
u32 ip = 0, ip_to = 0, ip_last, p = 0, port, port_to;
|
||||
u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2;
|
||||
u32 ip = 0, ip_to = 0, p = 0, port, port_to;
|
||||
u32 ip2_from = 0, ip2_to = 0, ip2;
|
||||
bool with_ports = false;
|
||||
int ret;
|
||||
|
||||
|
@ -288,33 +288,34 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
|
||||
}
|
||||
|
||||
if (retried)
|
||||
if (retried) {
|
||||
ip = ntohl(h->next.ip[0]);
|
||||
p = ntohs(h->next.port);
|
||||
ip2 = ntohl(h->next.ip[1]);
|
||||
} else {
|
||||
p = port;
|
||||
ip2 = ip2_from;
|
||||
}
|
||||
|
||||
while (ip <= ip_to) {
|
||||
do {
|
||||
e.ip[0] = htonl(ip);
|
||||
ip_last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
|
||||
p = retried && ip == ntohl(h->next.ip[0]) ? ntohs(h->next.port)
|
||||
: port;
|
||||
ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
|
||||
for (; p <= port_to; p++) {
|
||||
e.port = htons(p);
|
||||
ip2 = (retried && ip == ntohl(h->next.ip[0]) &&
|
||||
p == ntohs(h->next.port)) ? ntohl(h->next.ip[1])
|
||||
: ip2_from;
|
||||
while (ip2 <= ip2_to) {
|
||||
do {
|
||||
e.ip[1] = htonl(ip2);
|
||||
ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
|
||||
ip2 = ip_set_range_to_cidr(ip2, ip2_to,
|
||||
&e.cidr[1]);
|
||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||
if (ret && !ip_set_eexist(ret, flags))
|
||||
return ret;
|
||||
|
||||
ret = 0;
|
||||
ip2 = ip2_last + 1;
|
||||
}
|
||||
}
|
||||
ip = ip_last + 1;
|
||||
} while (ip2++ < ip2_to);
|
||||
ip2 = ip2_from;
|
||||
}
|
||||
p = port;
|
||||
} while (ip++ < ip_to);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -39,7 +39,6 @@ MODULE_LICENSE("GPL");
|
|||
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
|
||||
MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
|
||||
|
||||
#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
|
||||
#define XT_PCPU_BLOCK_SIZE 4096
|
||||
|
||||
struct compat_delta {
|
||||
|
@ -210,6 +209,9 @@ xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
|
|||
{
|
||||
struct xt_match *match;
|
||||
|
||||
if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
match = xt_find_match(nfproto, name, revision);
|
||||
if (IS_ERR(match)) {
|
||||
request_module("%st_%s", xt_prefix[nfproto], name);
|
||||
|
@ -252,6 +254,9 @@ struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
|
|||
{
|
||||
struct xt_target *target;
|
||||
|
||||
if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
target = xt_find_target(af, name, revision);
|
||||
if (IS_ERR(target)) {
|
||||
request_module("%st_%s", xt_prefix[af], name);
|
||||
|
@ -1000,7 +1005,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
|
|||
return NULL;
|
||||
|
||||
/* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
|
||||
if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
|
||||
if ((size >> PAGE_SHIFT) + 2 > totalram_pages)
|
||||
return NULL;
|
||||
|
||||
info = kvmalloc(sz, GFP_KERNEL);
|
||||
|
|
|
@ -252,6 +252,7 @@ static struct xt_target idletimer_tg __read_mostly = {
|
|||
.family = NFPROTO_UNSPEC,
|
||||
.target = idletimer_tg_target,
|
||||
.targetsize = sizeof(struct idletimer_tg_info),
|
||||
.usersize = offsetof(struct idletimer_tg_info, timer),
|
||||
.checkentry = idletimer_tg_checkentry,
|
||||
.destroy = idletimer_tg_destroy,
|
||||
.me = THIS_MODULE,
|
||||
|
|
|
@ -198,6 +198,7 @@ static struct xt_target led_tg_reg __read_mostly = {
|
|||
.family = NFPROTO_UNSPEC,
|
||||
.target = led_tg,
|
||||
.targetsize = sizeof(struct xt_led_info),
|
||||
.usersize = offsetof(struct xt_led_info, internal_data),
|
||||
.checkentry = led_tg_check,
|
||||
.destroy = led_tg_destroy,
|
||||
.me = THIS_MODULE,
|
||||
|
|
|
@ -193,9 +193,8 @@ static struct xt_match limit_mt_reg __read_mostly = {
|
|||
.compatsize = sizeof(struct compat_xt_rateinfo),
|
||||
.compat_from_user = limit_mt_compat_from_user,
|
||||
.compat_to_user = limit_mt_compat_to_user,
|
||||
#else
|
||||
.usersize = offsetof(struct xt_rateinfo, prev),
|
||||
#endif
|
||||
.usersize = offsetof(struct xt_rateinfo, prev),
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
|
|
@ -62,6 +62,7 @@ static struct xt_match nfacct_mt_reg __read_mostly = {
|
|||
.match = nfacct_mt,
|
||||
.destroy = nfacct_mt_destroy,
|
||||
.matchsize = sizeof(struct xt_nfacct_match_info),
|
||||
.usersize = offsetof(struct xt_nfacct_match_info, nfacct),
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
|
|
@ -84,6 +84,7 @@ static struct xt_match xt_statistic_mt_reg __read_mostly = {
|
|||
.checkentry = statistic_mt_check,
|
||||
.destroy = statistic_mt_destroy,
|
||||
.matchsize = sizeof(struct xt_statistic_info),
|
||||
.usersize = offsetof(struct xt_statistic_info, master),
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
|
|
@ -1098,6 +1098,36 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Trim the skb to the length specified by the IP/IPv6 header,
|
||||
* removing any trailing lower-layer padding. This prepares the skb
|
||||
* for higher-layer processing that assumes skb->len excludes padding
|
||||
* (such as nf_ip_checksum). The caller needs to pull the skb to the
|
||||
* network header, and ensure ip_hdr/ipv6_hdr points to valid data.
|
||||
*/
|
||||
static int ovs_skb_network_trim(struct sk_buff *skb)
|
||||
{
|
||||
unsigned int len;
|
||||
int err;
|
||||
|
||||
switch (skb->protocol) {
|
||||
case htons(ETH_P_IP):
|
||||
len = ntohs(ip_hdr(skb)->tot_len);
|
||||
break;
|
||||
case htons(ETH_P_IPV6):
|
||||
len = sizeof(struct ipv6hdr)
|
||||
+ ntohs(ipv6_hdr(skb)->payload_len);
|
||||
break;
|
||||
default:
|
||||
len = skb->len;
|
||||
}
|
||||
|
||||
err = pskb_trim_rcsum(skb, len);
|
||||
if (err)
|
||||
kfree_skb(skb);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
|
||||
* value if 'skb' is freed.
|
||||
*/
|
||||
|
@ -1112,6 +1142,10 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
|
|||
nh_ofs = skb_network_offset(skb);
|
||||
skb_pull_rcsum(skb, nh_ofs);
|
||||
|
||||
err = ovs_skb_network_trim(skb);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (key->ip.frag != OVS_FRAG_TYPE_NONE) {
|
||||
err = handle_fragments(net, key, info->zone.id, skb);
|
||||
if (err)
|
||||
|
|
|
@ -548,6 +548,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
|
|||
static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
|
||||
u32 flags, struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
|
||||
struct tcf_block *block = tp->chain->block;
|
||||
struct tc_cls_u32_offload cls_u32 = {};
|
||||
bool skip_sw = tc_skip_sw(flags);
|
||||
|
@ -567,7 +568,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
|
|||
cls_u32.knode.sel = &n->sel;
|
||||
cls_u32.knode.exts = &n->exts;
|
||||
if (n->ht_down)
|
||||
cls_u32.knode.link_handle = n->ht_down->handle;
|
||||
cls_u32.knode.link_handle = ht->handle;
|
||||
|
||||
err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
|
||||
if (err < 0) {
|
||||
|
@ -855,8 +856,9 @@ static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
|
|||
static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
|
||||
struct tc_u_knode *n)
|
||||
{
|
||||
struct tc_u_knode *new;
|
||||
struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
|
||||
struct tc_u32_sel *s = &n->sel;
|
||||
struct tc_u_knode *new;
|
||||
|
||||
new = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key),
|
||||
GFP_KERNEL);
|
||||
|
@ -874,11 +876,11 @@ static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
|
|||
new->fshift = n->fshift;
|
||||
new->res = n->res;
|
||||
new->flags = n->flags;
|
||||
RCU_INIT_POINTER(new->ht_down, n->ht_down);
|
||||
RCU_INIT_POINTER(new->ht_down, ht);
|
||||
|
||||
/* bump reference count as long as we hold pointer to structure */
|
||||
if (new->ht_down)
|
||||
new->ht_down->refcnt++;
|
||||
if (ht)
|
||||
ht->refcnt++;
|
||||
|
||||
#ifdef CONFIG_CLS_U32_PERF
|
||||
/* Statistics may be incremented by readers during update
|
||||
|
|
|
@ -142,16 +142,6 @@ static u64 psched_ns_t2l(const struct psched_ratecfg *r,
|
|||
return len;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return length of individual segments of a gso packet,
|
||||
* including all headers (MAC, IP, TCP/UDP)
|
||||
*/
|
||||
static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
|
||||
{
|
||||
unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
|
||||
return hdr_len + skb_gso_transport_seglen(skb);
|
||||
}
|
||||
|
||||
/* GSO packet is too big, segment it so that tbf can transmit
|
||||
* each segment in time
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue