mirror of https://gitee.com/openkylin/linux.git
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Various netfilter fixlets from Pablo and the netfilter team. 2) Fix regression in IPVS caused by lack of PMTU exceptions on local routes in ipv6, from Julian Anastasov. 3) Check pskb_trim_rcsum for failure in DSA, from Zhouyang Jia. 4) Don't crash on poll in TLS, from Daniel Borkmann. 5) Revert SO_REUSE{ADDR,PORT} change, it regresses various things including Avahi mDNS. From Bart Van Assche. 6) Missing of_node_put in qcom/emac driver, from Yue Haibing. 7) We lack checking of the TCP checking in one special case during SYN receive, from Frank van der Linden. 8) Fix module init error paths of mac80211 hwsim, from Johannes Berg. 9) Handle 802.1ad properly in stmmac driver, from Elad Nachman. 10) Must grab HW caps before doing quirk checks in stmmac driver, from Jose Abreu. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (81 commits) net: stmmac: Run HWIF Quirks after getting HW caps neighbour: skip NTF_EXT_LEARNED entries during forced gc net: cxgb3: add error handling for sysfs_create_group tls: fix waitall behavior in tls_sw_recvmsg tls: fix use-after-free in tls_push_record l2tp: filter out non-PPP sessions in pppol2tp_tunnel_ioctl() l2tp: reject creation of non-PPP sessions on L2TPv2 tunnels mlxsw: spectrum_switchdev: Fix port_vlan refcounting mlxsw: spectrum_router: Align with new route replace logic mlxsw: spectrum_router: Allow appending to dev-only routes ipv6: Only emit append events for appended routes stmmac: added support for 802.1ad vlan stripping cfg80211: fix rcu in cfg80211_unregister_wdev mac80211: Move up init of TXQs mac80211_hwsim: fix module init error paths cfg80211: initialize sinfo in cfg80211_get_station nl80211: fix some kernel doc tag mistakes hv_netvsc: Fix the variable sizes in ipsecv2 and rsc offload rds: avoid unenecessary cong_update in loop transport l2tp: clean up stale tunnel or session in pppol2tp_connect's error path ...
This commit is contained in:
commit
9215310cf1
|
@ -325,6 +325,8 @@ struct nicvf {
|
|||
struct tasklet_struct qs_err_task;
|
||||
struct work_struct reset_task;
|
||||
struct nicvf_work rx_mode_work;
|
||||
/* spinlock to protect workqueue arguments from concurrent access */
|
||||
spinlock_t rx_mode_wq_lock;
|
||||
|
||||
/* PTP timestamp */
|
||||
struct cavium_ptp *ptp_clock;
|
||||
|
|
|
@ -1923,17 +1923,12 @@ static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
|
|||
}
|
||||
}
|
||||
|
||||
static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
|
||||
static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
|
||||
struct nicvf *nic)
|
||||
{
|
||||
struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work,
|
||||
work.work);
|
||||
struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
|
||||
union nic_mbx mbx = {};
|
||||
int idx;
|
||||
|
||||
if (!vf_work)
|
||||
return;
|
||||
|
||||
/* From the inside of VM code flow we have only 128 bits memory
|
||||
* available to send message to host's PF, so send all mc addrs
|
||||
* one by one, starting from flush command in case if kernel
|
||||
|
@ -1944,7 +1939,7 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
|
|||
mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST;
|
||||
nicvf_send_msg_to_pf(nic, &mbx);
|
||||
|
||||
if (vf_work->mode & BGX_XCAST_MCAST_FILTER) {
|
||||
if (mode & BGX_XCAST_MCAST_FILTER) {
|
||||
/* once enabling filtering, we need to signal to PF to add
|
||||
* its' own LMAC to the filter to accept packets for it.
|
||||
*/
|
||||
|
@ -1954,23 +1949,46 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
|
|||
}
|
||||
|
||||
/* check if we have any specific MACs to be added to PF DMAC filter */
|
||||
if (vf_work->mc) {
|
||||
if (mc_addrs) {
|
||||
/* now go through kernel list of MACs and add them one by one */
|
||||
for (idx = 0; idx < vf_work->mc->count; idx++) {
|
||||
for (idx = 0; idx < mc_addrs->count; idx++) {
|
||||
mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
|
||||
mbx.xcast.data.mac = vf_work->mc->mc[idx];
|
||||
mbx.xcast.data.mac = mc_addrs->mc[idx];
|
||||
nicvf_send_msg_to_pf(nic, &mbx);
|
||||
}
|
||||
kfree(vf_work->mc);
|
||||
kfree(mc_addrs);
|
||||
}
|
||||
|
||||
/* and finally set rx mode for PF accordingly */
|
||||
mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST;
|
||||
mbx.xcast.data.mode = vf_work->mode;
|
||||
mbx.xcast.data.mode = mode;
|
||||
|
||||
nicvf_send_msg_to_pf(nic, &mbx);
|
||||
}
|
||||
|
||||
static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
|
||||
{
|
||||
struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work,
|
||||
work.work);
|
||||
struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
|
||||
u8 mode;
|
||||
struct xcast_addr_list *mc;
|
||||
|
||||
if (!vf_work)
|
||||
return;
|
||||
|
||||
/* Save message data locally to prevent them from
|
||||
* being overwritten by next ndo_set_rx_mode call().
|
||||
*/
|
||||
spin_lock(&nic->rx_mode_wq_lock);
|
||||
mode = vf_work->mode;
|
||||
mc = vf_work->mc;
|
||||
vf_work->mc = NULL;
|
||||
spin_unlock(&nic->rx_mode_wq_lock);
|
||||
|
||||
__nicvf_set_rx_mode_task(mode, mc, nic);
|
||||
}
|
||||
|
||||
static void nicvf_set_rx_mode(struct net_device *netdev)
|
||||
{
|
||||
struct nicvf *nic = netdev_priv(netdev);
|
||||
|
@ -2004,9 +2022,12 @@ static void nicvf_set_rx_mode(struct net_device *netdev)
|
|||
}
|
||||
}
|
||||
}
|
||||
spin_lock(&nic->rx_mode_wq_lock);
|
||||
kfree(nic->rx_mode_work.mc);
|
||||
nic->rx_mode_work.mc = mc_list;
|
||||
nic->rx_mode_work.mode = mode;
|
||||
queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 2 * HZ);
|
||||
queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 0);
|
||||
spin_unlock(&nic->rx_mode_wq_lock);
|
||||
}
|
||||
|
||||
static const struct net_device_ops nicvf_netdev_ops = {
|
||||
|
@ -2163,6 +2184,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
INIT_WORK(&nic->reset_task, nicvf_reset_task);
|
||||
|
||||
INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
|
||||
spin_lock_init(&nic->rx_mode_wq_lock);
|
||||
|
||||
err = register_netdev(netdev);
|
||||
if (err) {
|
||||
|
|
|
@ -3362,10 +3362,17 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
|
||||
err = sysfs_create_group(&adapter->port[0]->dev.kobj,
|
||||
&cxgb3_attr_group);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "cannot create sysfs group\n");
|
||||
goto out_close_led;
|
||||
}
|
||||
|
||||
print_port_info(adapter, ai);
|
||||
return 0;
|
||||
|
||||
out_close_led:
|
||||
t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
|
||||
|
||||
out_free_dev:
|
||||
iounmap(adapter->regs);
|
||||
for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
|
||||
|
|
|
@ -760,9 +760,9 @@ struct ixgbe_adapter {
|
|||
#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
|
||||
u32 *rss_key;
|
||||
|
||||
#ifdef CONFIG_XFRM
|
||||
#ifdef CONFIG_XFRM_OFFLOAD
|
||||
struct ixgbe_ipsec *ipsec;
|
||||
#endif /* CONFIG_XFRM */
|
||||
#endif /* CONFIG_XFRM_OFFLOAD */
|
||||
};
|
||||
|
||||
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
|
||||
|
|
|
@ -158,7 +158,16 @@ static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter)
|
|||
reg |= IXGBE_SECRXCTRL_RX_DIS;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
|
||||
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
/* If both Tx and Rx are ready there are no packets
|
||||
* that we need to flush so the loopback configuration
|
||||
* below is not necessary.
|
||||
*/
|
||||
t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
|
||||
IXGBE_SECTXSTAT_SECTX_RDY;
|
||||
r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
|
||||
IXGBE_SECRXSTAT_SECRX_RDY;
|
||||
if (t_rdy && r_rdy)
|
||||
return;
|
||||
|
||||
/* If the tx fifo doesn't have link, but still has data,
|
||||
* we can't clear the tx sec block. Set the MAC loopback
|
||||
|
@ -185,7 +194,7 @@ static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter)
|
|||
IXGBE_SECTXSTAT_SECTX_RDY;
|
||||
r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
|
||||
IXGBE_SECRXSTAT_SECRX_RDY;
|
||||
} while (!t_rdy && !r_rdy && limit--);
|
||||
} while (!(t_rdy && r_rdy) && limit--);
|
||||
|
||||
/* undo loopback if we played with it earlier */
|
||||
if (!link) {
|
||||
|
@ -966,10 +975,22 @@ void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
|
|||
**/
|
||||
void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
struct ixgbe_ipsec *ipsec;
|
||||
u32 t_dis, r_dis;
|
||||
size_t size;
|
||||
|
||||
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
|
||||
if (hw->mac.type == ixgbe_mac_82598EB)
|
||||
return;
|
||||
|
||||
/* If there is no support for either Tx or Rx offload
|
||||
* we should not be advertising support for IPsec.
|
||||
*/
|
||||
t_dis = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
|
||||
IXGBE_SECTXSTAT_SECTX_OFF_DIS;
|
||||
r_dis = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
|
||||
IXGBE_SECRXSTAT_SECRX_OFF_DIS;
|
||||
if (t_dis || r_dis)
|
||||
return;
|
||||
|
||||
ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
|
||||
|
@ -1001,13 +1022,6 @@ void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter)
|
|||
|
||||
adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops;
|
||||
|
||||
#define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \
|
||||
NETIF_F_HW_ESP_TX_CSUM | \
|
||||
NETIF_F_GSO_ESP)
|
||||
|
||||
adapter->netdev->features |= IXGBE_ESP_FEATURES;
|
||||
adapter->netdev->hw_enc_features |= IXGBE_ESP_FEATURES;
|
||||
|
||||
return;
|
||||
|
||||
err2:
|
||||
|
|
|
@ -593,6 +593,14 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
|
|||
}
|
||||
|
||||
#endif
|
||||
/* To support macvlan offload we have to use num_tc to
|
||||
* restrict the queues that can be used by the device.
|
||||
* By doing this we can avoid reporting a false number of
|
||||
* queues.
|
||||
*/
|
||||
if (vmdq_i > 1)
|
||||
netdev_set_num_tc(adapter->netdev, 1);
|
||||
|
||||
/* populate TC0 for use by pool 0 */
|
||||
netdev_set_tc_queue(adapter->netdev, 0,
|
||||
adapter->num_rx_queues_per_pool, 0);
|
||||
|
|
|
@ -6117,6 +6117,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
|
|||
#ifdef CONFIG_IXGBE_DCB
|
||||
ixgbe_init_dcb(adapter);
|
||||
#endif
|
||||
ixgbe_init_ipsec_offload(adapter);
|
||||
|
||||
/* default flow control settings */
|
||||
hw->fc.requested_mode = ixgbe_fc_full;
|
||||
|
@ -8822,14 +8823,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
|
|||
} else {
|
||||
netdev_reset_tc(dev);
|
||||
|
||||
/* To support macvlan offload we have to use num_tc to
|
||||
* restrict the queues that can be used by the device.
|
||||
* By doing this we can avoid reporting a false number of
|
||||
* queues.
|
||||
*/
|
||||
if (!tc && adapter->num_rx_pools > 1)
|
||||
netdev_set_num_tc(dev, 1);
|
||||
|
||||
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
|
||||
adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
|
||||
|
||||
|
@ -9904,7 +9897,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
|
|||
* the TSO, so it's the exception.
|
||||
*/
|
||||
if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) {
|
||||
#ifdef CONFIG_XFRM
|
||||
#ifdef CONFIG_XFRM_OFFLOAD
|
||||
if (!skb->sp)
|
||||
#endif
|
||||
features &= ~NETIF_F_TSO;
|
||||
|
@ -10437,6 +10430,14 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
if (hw->mac.type >= ixgbe_mac_82599EB)
|
||||
netdev->features |= NETIF_F_SCTP_CRC;
|
||||
|
||||
#ifdef CONFIG_XFRM_OFFLOAD
|
||||
#define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \
|
||||
NETIF_F_HW_ESP_TX_CSUM | \
|
||||
NETIF_F_GSO_ESP)
|
||||
|
||||
if (adapter->ipsec)
|
||||
netdev->features |= IXGBE_ESP_FEATURES;
|
||||
#endif
|
||||
/* copy netdev features into list of user selectable features */
|
||||
netdev->hw_features |= netdev->features |
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER |
|
||||
|
@ -10499,8 +10500,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
NETIF_F_FCOE_MTU;
|
||||
}
|
||||
#endif /* IXGBE_FCOE */
|
||||
ixgbe_init_ipsec_offload(adapter);
|
||||
|
||||
if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
|
||||
netdev->hw_features |= NETIF_F_LRO;
|
||||
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
|
||||
|
|
|
@ -599,13 +599,15 @@ struct ixgbe_nvm_version {
|
|||
#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004
|
||||
|
||||
#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001
|
||||
#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002
|
||||
#define IXGBE_SECTXSTAT_SECTX_OFF_DIS 0x00000002
|
||||
#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000004
|
||||
|
||||
#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001
|
||||
#define IXGBE_SECRXCTRL_RX_DIS 0x00000002
|
||||
|
||||
#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001
|
||||
#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002
|
||||
#define IXGBE_SECRXSTAT_SECRX_OFF_DIS 0x00000002
|
||||
#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000004
|
||||
|
||||
/* LinkSec (MacSec) Registers */
|
||||
#define IXGBE_LSECTXCAP 0x08A00
|
||||
|
|
|
@ -4756,12 +4756,6 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
|
|||
kfree(mlxsw_sp_rt6);
|
||||
}
|
||||
|
||||
static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
|
||||
{
|
||||
/* RTF_CACHE routes are ignored */
|
||||
return (rt->fib6_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
|
||||
}
|
||||
|
||||
static struct fib6_info *
|
||||
mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
|
||||
{
|
||||
|
@ -4771,11 +4765,11 @@ mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
|
|||
|
||||
static struct mlxsw_sp_fib6_entry *
|
||||
mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
|
||||
const struct fib6_info *nrt, bool replace)
|
||||
const struct fib6_info *nrt, bool append)
|
||||
{
|
||||
struct mlxsw_sp_fib6_entry *fib6_entry;
|
||||
|
||||
if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
|
||||
if (!append)
|
||||
return NULL;
|
||||
|
||||
list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
|
||||
|
@ -4790,8 +4784,7 @@ mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
|
|||
break;
|
||||
if (rt->fib6_metric < nrt->fib6_metric)
|
||||
continue;
|
||||
if (rt->fib6_metric == nrt->fib6_metric &&
|
||||
mlxsw_sp_fib6_rt_can_mp(rt))
|
||||
if (rt->fib6_metric == nrt->fib6_metric)
|
||||
return fib6_entry;
|
||||
if (rt->fib6_metric > nrt->fib6_metric)
|
||||
break;
|
||||
|
@ -5170,7 +5163,7 @@ static struct mlxsw_sp_fib6_entry *
|
|||
mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
|
||||
const struct fib6_info *nrt, bool replace)
|
||||
{
|
||||
struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
|
||||
struct mlxsw_sp_fib6_entry *fib6_entry;
|
||||
|
||||
list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
|
||||
struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
|
||||
|
@ -5179,18 +5172,13 @@ mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
|
|||
continue;
|
||||
if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
|
||||
break;
|
||||
if (replace && rt->fib6_metric == nrt->fib6_metric) {
|
||||
if (mlxsw_sp_fib6_rt_can_mp(rt) ==
|
||||
mlxsw_sp_fib6_rt_can_mp(nrt))
|
||||
return fib6_entry;
|
||||
if (mlxsw_sp_fib6_rt_can_mp(nrt))
|
||||
fallback = fallback ?: fib6_entry;
|
||||
}
|
||||
if (replace && rt->fib6_metric == nrt->fib6_metric)
|
||||
return fib6_entry;
|
||||
if (rt->fib6_metric > nrt->fib6_metric)
|
||||
return fallback ?: fib6_entry;
|
||||
return fib6_entry;
|
||||
}
|
||||
|
||||
return fallback;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -5316,7 +5304,8 @@ static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
|
|||
}
|
||||
|
||||
static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
|
||||
struct fib6_info *rt, bool replace)
|
||||
struct fib6_info *rt, bool replace,
|
||||
bool append)
|
||||
{
|
||||
struct mlxsw_sp_fib6_entry *fib6_entry;
|
||||
struct mlxsw_sp_fib_node *fib_node;
|
||||
|
@ -5342,7 +5331,7 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
|
|||
/* Before creating a new entry, try to append route to an existing
|
||||
* multipath entry.
|
||||
*/
|
||||
fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
|
||||
fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, append);
|
||||
if (fib6_entry) {
|
||||
err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
|
||||
if (err)
|
||||
|
@ -5350,6 +5339,14 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* We received an append event, yet did not find any route to
|
||||
* append to.
|
||||
*/
|
||||
if (WARN_ON(append)) {
|
||||
err = -EINVAL;
|
||||
goto err_fib6_entry_append;
|
||||
}
|
||||
|
||||
fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
|
||||
if (IS_ERR(fib6_entry)) {
|
||||
err = PTR_ERR(fib6_entry);
|
||||
|
@ -5367,6 +5364,7 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
|
|||
err_fib6_node_entry_link:
|
||||
mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
|
||||
err_fib6_entry_create:
|
||||
err_fib6_entry_append:
|
||||
err_fib6_entry_nexthop_add:
|
||||
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
|
||||
return err;
|
||||
|
@ -5717,7 +5715,7 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
|
|||
struct mlxsw_sp_fib_event_work *fib_work =
|
||||
container_of(work, struct mlxsw_sp_fib_event_work, work);
|
||||
struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
|
||||
bool replace;
|
||||
bool replace, append;
|
||||
int err;
|
||||
|
||||
rtnl_lock();
|
||||
|
@ -5728,8 +5726,10 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
|
|||
case FIB_EVENT_ENTRY_APPEND: /* fall through */
|
||||
case FIB_EVENT_ENTRY_ADD:
|
||||
replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
|
||||
append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
|
||||
err = mlxsw_sp_router_fib6_add(mlxsw_sp,
|
||||
fib_work->fen6_info.rt, replace);
|
||||
fib_work->fen6_info.rt, replace,
|
||||
append);
|
||||
if (err)
|
||||
mlxsw_sp_router_fib_abort(mlxsw_sp);
|
||||
mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
|
||||
|
|
|
@ -1018,8 +1018,10 @@ mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
|
|||
int err;
|
||||
|
||||
/* No need to continue if only VLAN flags were changed */
|
||||
if (mlxsw_sp_port_vlan->bridge_port)
|
||||
if (mlxsw_sp_port_vlan->bridge_port) {
|
||||
mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port);
|
||||
if (err)
|
||||
|
|
|
@ -455,6 +455,7 @@ static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
|
|||
|
||||
eth_hw_addr_random(nn->dp.netdev);
|
||||
netif_keep_dst(nn->dp.netdev);
|
||||
nn->vnic_no_name = true;
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -381,6 +381,8 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
|
|||
err = PTR_ERR_OR_ZERO(rt);
|
||||
if (err)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
ip_rt_put(rt);
|
||||
#else
|
||||
return NOTIFY_DONE;
|
||||
#endif
|
||||
|
|
|
@ -590,6 +590,8 @@ struct nfp_net_dp {
|
|||
* @vnic_list: Entry on device vNIC list
|
||||
* @pdev: Backpointer to PCI device
|
||||
* @app: APP handle if available
|
||||
* @vnic_no_name: For non-port PF vNIC make ndo_get_phys_port_name return
|
||||
* -EOPNOTSUPP to keep backwards compatibility (set by app)
|
||||
* @port: Pointer to nfp_port structure if vNIC is a port
|
||||
* @app_priv: APP private data for this vNIC
|
||||
*/
|
||||
|
@ -663,6 +665,8 @@ struct nfp_net {
|
|||
struct pci_dev *pdev;
|
||||
struct nfp_app *app;
|
||||
|
||||
bool vnic_no_name;
|
||||
|
||||
struct nfp_port *port;
|
||||
|
||||
void *app_priv;
|
||||
|
|
|
@ -3121,7 +3121,7 @@ static void nfp_net_stat64(struct net_device *netdev,
|
|||
struct nfp_net *nn = netdev_priv(netdev);
|
||||
int r;
|
||||
|
||||
for (r = 0; r < nn->dp.num_r_vecs; r++) {
|
||||
for (r = 0; r < nn->max_r_vecs; r++) {
|
||||
struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
|
||||
u64 data[3];
|
||||
unsigned int start;
|
||||
|
@ -3286,7 +3286,7 @@ nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
|
|||
if (nn->port)
|
||||
return nfp_port_get_phys_port_name(netdev, name, len);
|
||||
|
||||
if (nn->dp.is_vf)
|
||||
if (nn->dp.is_vf || nn->vnic_no_name)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
n = snprintf(name, len, "n%d", nn->id);
|
||||
|
|
|
@ -98,21 +98,18 @@ struct nfp_resource {
|
|||
|
||||
static int nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res)
|
||||
{
|
||||
char name_pad[NFP_RESOURCE_ENTRY_NAME_SZ] = {};
|
||||
struct nfp_resource_entry entry;
|
||||
u32 cpp_id, key;
|
||||
int ret, i;
|
||||
|
||||
cpp_id = NFP_CPP_ID(NFP_RESOURCE_TBL_TARGET, 3, 0); /* Atomic read */
|
||||
|
||||
strncpy(name_pad, res->name, sizeof(name_pad));
|
||||
|
||||
/* Search for a matching entry */
|
||||
if (!memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8)) {
|
||||
if (!strcmp(res->name, NFP_RESOURCE_TBL_NAME)) {
|
||||
nfp_err(cpp, "Grabbing device lock not supported\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
key = crc32_posix(name_pad, sizeof(name_pad));
|
||||
key = crc32_posix(res->name, NFP_RESOURCE_ENTRY_NAME_SZ);
|
||||
|
||||
for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) {
|
||||
u64 addr = NFP_RESOURCE_TBL_BASE +
|
||||
|
|
|
@ -384,6 +384,7 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
|
|||
}
|
||||
|
||||
sgmii_pdev = of_find_device_by_node(np);
|
||||
of_node_put(np);
|
||||
if (!sgmii_pdev) {
|
||||
dev_err(&pdev->dev, "invalid internal-phy property\n");
|
||||
return -ENODEV;
|
||||
|
|
|
@ -334,9 +334,10 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
|
|||
|
||||
dwmac->data = (const struct meson8b_dwmac_data *)
|
||||
of_device_get_match_data(&pdev->dev);
|
||||
if (!dwmac->data)
|
||||
return -EINVAL;
|
||||
|
||||
if (!dwmac->data) {
|
||||
ret = -EINVAL;
|
||||
goto err_remove_config_dt;
|
||||
}
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
dwmac->regs = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(dwmac->regs)) {
|
||||
|
|
|
@ -252,13 +252,8 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* Run quirks, if needed */
|
||||
if (entry->quirks) {
|
||||
ret = entry->quirks(priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Save quirks, if needed for posterior use */
|
||||
priv->hwif_quirks = entry->quirks;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -129,6 +129,7 @@ struct stmmac_priv {
|
|||
struct net_device *dev;
|
||||
struct device *device;
|
||||
struct mac_device_info *hw;
|
||||
int (*hwif_quirks)(struct stmmac_priv *priv);
|
||||
struct mutex lock;
|
||||
|
||||
/* RX Queue */
|
||||
|
|
|
@ -3182,17 +3182,22 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct ethhdr *ehdr;
|
||||
struct vlan_ethhdr *veth;
|
||||
__be16 vlan_proto;
|
||||
u16 vlanid;
|
||||
|
||||
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
|
||||
NETIF_F_HW_VLAN_CTAG_RX &&
|
||||
!__vlan_get_tag(skb, &vlanid)) {
|
||||
veth = (struct vlan_ethhdr *)skb->data;
|
||||
vlan_proto = veth->h_vlan_proto;
|
||||
|
||||
if ((vlan_proto == htons(ETH_P_8021Q) &&
|
||||
dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
|
||||
(vlan_proto == htons(ETH_P_8021AD) &&
|
||||
dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
|
||||
/* pop the vlan tag */
|
||||
ehdr = (struct ethhdr *)skb->data;
|
||||
memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
|
||||
vlanid = ntohs(veth->h_vlan_TCI);
|
||||
memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
|
||||
skb_pull(skb, VLAN_HLEN);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
|
||||
__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4130,6 +4135,13 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
|
|||
if (priv->dma_cap.tsoen)
|
||||
dev_info(priv->device, "TSO supported\n");
|
||||
|
||||
/* Run HW quirks, if any */
|
||||
if (priv->hwif_quirks) {
|
||||
ret = priv->hwif_quirks(priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4235,7 +4247,7 @@ int stmmac_dvr_probe(struct device *device,
|
|||
ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
|
||||
#ifdef STMMAC_VLAN_TAG_USED
|
||||
/* Both mac100 and gmac support receive VLAN tag detection */
|
||||
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
|
||||
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
|
||||
#endif
|
||||
priv->msg_enable = netif_msg_init(debug, default_msg_level);
|
||||
|
||||
|
|
|
@ -123,7 +123,6 @@
|
|||
* @phy_node: pointer to the PHY device node
|
||||
* @mii_bus: pointer to the MII bus
|
||||
* @last_link: last link status
|
||||
* @has_mdio: indicates whether MDIO is included in the HW
|
||||
*/
|
||||
struct net_local {
|
||||
|
||||
|
@ -144,7 +143,6 @@ struct net_local {
|
|||
struct mii_bus *mii_bus;
|
||||
|
||||
int last_link;
|
||||
bool has_mdio;
|
||||
};
|
||||
|
||||
|
||||
|
@ -863,14 +861,14 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
|
|||
bus->write = xemaclite_mdio_write;
|
||||
bus->parent = dev;
|
||||
|
||||
lp->mii_bus = bus;
|
||||
|
||||
rc = of_mdiobus_register(bus, np);
|
||||
if (rc) {
|
||||
dev_err(dev, "Failed to register mdio bus.\n");
|
||||
goto err_register;
|
||||
}
|
||||
|
||||
lp->mii_bus = bus;
|
||||
|
||||
return 0;
|
||||
|
||||
err_register:
|
||||
|
@ -1145,9 +1143,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
|
|||
xemaclite_update_address(lp, ndev->dev_addr);
|
||||
|
||||
lp->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
|
||||
rc = xemaclite_mdio_setup(lp, &ofdev->dev);
|
||||
if (rc)
|
||||
dev_warn(&ofdev->dev, "error registering MDIO bus\n");
|
||||
xemaclite_mdio_setup(lp, &ofdev->dev);
|
||||
|
||||
dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr);
|
||||
|
||||
|
@ -1191,7 +1187,7 @@ static int xemaclite_of_remove(struct platform_device *of_dev)
|
|||
struct net_local *lp = netdev_priv(ndev);
|
||||
|
||||
/* Un-register the mii_bus, if configured */
|
||||
if (lp->has_mdio) {
|
||||
if (lp->mii_bus) {
|
||||
mdiobus_unregister(lp->mii_bus);
|
||||
mdiobus_free(lp->mii_bus);
|
||||
lp->mii_bus = NULL;
|
||||
|
|
|
@ -2,6 +2,5 @@ config HYPERV_NET
|
|||
tristate "Microsoft Hyper-V virtual network driver"
|
||||
depends on HYPERV
|
||||
select UCS2_STRING
|
||||
select FAILOVER
|
||||
help
|
||||
Select this option to enable the Hyper-V virtual network driver.
|
||||
|
|
|
@ -901,6 +901,8 @@ struct net_device_context {
|
|||
struct hv_device *device_ctx;
|
||||
/* netvsc_device */
|
||||
struct netvsc_device __rcu *nvdev;
|
||||
/* list of netvsc net_devices */
|
||||
struct list_head list;
|
||||
/* reconfigure work */
|
||||
struct delayed_work dwork;
|
||||
/* last reconfig time */
|
||||
|
@ -931,8 +933,6 @@ struct net_device_context {
|
|||
u32 vf_alloc;
|
||||
/* Serial number of the VF to team with */
|
||||
u32 vf_serial;
|
||||
|
||||
struct failover *failover;
|
||||
};
|
||||
|
||||
/* Per channel data */
|
||||
|
@ -1277,17 +1277,17 @@ struct ndis_lsov2_offload {
|
|||
|
||||
struct ndis_ipsecv2_offload {
|
||||
u32 encap;
|
||||
u16 ip6;
|
||||
u16 ip4opt;
|
||||
u16 ip6ext;
|
||||
u16 ah;
|
||||
u16 esp;
|
||||
u16 ah_esp;
|
||||
u16 xport;
|
||||
u16 tun;
|
||||
u16 xport_tun;
|
||||
u16 lso;
|
||||
u16 extseq;
|
||||
u8 ip6;
|
||||
u8 ip4opt;
|
||||
u8 ip6ext;
|
||||
u8 ah;
|
||||
u8 esp;
|
||||
u8 ah_esp;
|
||||
u8 xport;
|
||||
u8 tun;
|
||||
u8 xport_tun;
|
||||
u8 lso;
|
||||
u8 extseq;
|
||||
u32 udp_esp;
|
||||
u32 auth;
|
||||
u32 crypto;
|
||||
|
@ -1295,8 +1295,8 @@ struct ndis_ipsecv2_offload {
|
|||
};
|
||||
|
||||
struct ndis_rsc_offload {
|
||||
u16 ip4;
|
||||
u16 ip6;
|
||||
u8 ip4;
|
||||
u8 ip6;
|
||||
};
|
||||
|
||||
struct ndis_encap_offload {
|
||||
|
|
|
@ -42,7 +42,6 @@
|
|||
#include <net/pkt_sched.h>
|
||||
#include <net/checksum.h>
|
||||
#include <net/ip6_checksum.h>
|
||||
#include <net/failover.h>
|
||||
|
||||
#include "hyperv_net.h"
|
||||
|
||||
|
@ -68,6 +67,8 @@ static int debug = -1;
|
|||
module_param(debug, int, 0444);
|
||||
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
|
||||
|
||||
static LIST_HEAD(netvsc_dev_list);
|
||||
|
||||
static void netvsc_change_rx_flags(struct net_device *net, int change)
|
||||
{
|
||||
struct net_device_context *ndev_ctx = netdev_priv(net);
|
||||
|
@ -1780,6 +1781,36 @@ static void netvsc_link_change(struct work_struct *w)
|
|||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static struct net_device *get_netvsc_bymac(const u8 *mac)
|
||||
{
|
||||
struct net_device_context *ndev_ctx;
|
||||
|
||||
list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
|
||||
struct net_device *dev = hv_get_drvdata(ndev_ctx->device_ctx);
|
||||
|
||||
if (ether_addr_equal(mac, dev->perm_addr))
|
||||
return dev;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
|
||||
{
|
||||
struct net_device_context *net_device_ctx;
|
||||
struct net_device *dev;
|
||||
|
||||
dev = netdev_master_upper_dev_get(vf_netdev);
|
||||
if (!dev || dev->netdev_ops != &device_ops)
|
||||
return NULL; /* not a netvsc device */
|
||||
|
||||
net_device_ctx = netdev_priv(dev);
|
||||
if (!rtnl_dereference(net_device_ctx->nvdev))
|
||||
return NULL; /* device is removed */
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
||||
/* Called when VF is injecting data into network stack.
|
||||
* Change the associated network device from VF to netvsc.
|
||||
* note: already called with rcu_read_lock
|
||||
|
@ -1802,6 +1833,46 @@ static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
|
|||
return RX_HANDLER_ANOTHER;
|
||||
}
|
||||
|
||||
static int netvsc_vf_join(struct net_device *vf_netdev,
|
||||
struct net_device *ndev)
|
||||
{
|
||||
struct net_device_context *ndev_ctx = netdev_priv(ndev);
|
||||
int ret;
|
||||
|
||||
ret = netdev_rx_handler_register(vf_netdev,
|
||||
netvsc_vf_handle_frame, ndev);
|
||||
if (ret != 0) {
|
||||
netdev_err(vf_netdev,
|
||||
"can not register netvsc VF receive handler (err = %d)\n",
|
||||
ret);
|
||||
goto rx_handler_failed;
|
||||
}
|
||||
|
||||
ret = netdev_master_upper_dev_link(vf_netdev, ndev,
|
||||
NULL, NULL, NULL);
|
||||
if (ret != 0) {
|
||||
netdev_err(vf_netdev,
|
||||
"can not set master device %s (err = %d)\n",
|
||||
ndev->name, ret);
|
||||
goto upper_link_failed;
|
||||
}
|
||||
|
||||
/* set slave flag before open to prevent IPv6 addrconf */
|
||||
vf_netdev->flags |= IFF_SLAVE;
|
||||
|
||||
schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
|
||||
|
||||
call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
|
||||
|
||||
netdev_info(vf_netdev, "joined to %s\n", ndev->name);
|
||||
return 0;
|
||||
|
||||
upper_link_failed:
|
||||
netdev_rx_handler_unregister(vf_netdev);
|
||||
rx_handler_failed:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __netvsc_vf_setup(struct net_device *ndev,
|
||||
struct net_device *vf_netdev)
|
||||
{
|
||||
|
@ -1852,95 +1923,104 @@ static void netvsc_vf_setup(struct work_struct *w)
|
|||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static int netvsc_pre_register_vf(struct net_device *vf_netdev,
|
||||
struct net_device *ndev)
|
||||
static int netvsc_register_vf(struct net_device *vf_netdev)
|
||||
{
|
||||
struct net_device *ndev;
|
||||
struct net_device_context *net_device_ctx;
|
||||
struct netvsc_device *netvsc_dev;
|
||||
int ret;
|
||||
|
||||
if (vf_netdev->addr_len != ETH_ALEN)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
/*
|
||||
* We will use the MAC address to locate the synthetic interface to
|
||||
* associate with the VF interface. If we don't find a matching
|
||||
* synthetic interface, move on.
|
||||
*/
|
||||
ndev = get_netvsc_bymac(vf_netdev->perm_addr);
|
||||
if (!ndev)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
net_device_ctx = netdev_priv(ndev);
|
||||
netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
|
||||
if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
|
||||
return -ENODEV;
|
||||
return NOTIFY_DONE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
/* if syntihetic interface is a different namespace,
|
||||
* then move the VF to that namespace; join will be
|
||||
* done again in that context.
|
||||
*/
|
||||
if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) {
|
||||
ret = dev_change_net_namespace(vf_netdev,
|
||||
dev_net(ndev), "eth%d");
|
||||
if (ret)
|
||||
netdev_err(vf_netdev,
|
||||
"could not move to same namespace as %s: %d\n",
|
||||
ndev->name, ret);
|
||||
else
|
||||
netdev_info(vf_netdev,
|
||||
"VF moved to namespace with: %s\n",
|
||||
ndev->name);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static int netvsc_register_vf(struct net_device *vf_netdev,
|
||||
struct net_device *ndev)
|
||||
{
|
||||
struct net_device_context *ndev_ctx = netdev_priv(ndev);
|
||||
netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
|
||||
|
||||
/* set slave flag before open to prevent IPv6 addrconf */
|
||||
vf_netdev->flags |= IFF_SLAVE;
|
||||
|
||||
schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
|
||||
|
||||
call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
|
||||
|
||||
netdev_info(vf_netdev, "joined to %s\n", ndev->name);
|
||||
if (netvsc_vf_join(vf_netdev, ndev) != 0)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
dev_hold(vf_netdev);
|
||||
rcu_assign_pointer(ndev_ctx->vf_netdev, vf_netdev);
|
||||
|
||||
return 0;
|
||||
rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
/* VF up/down change detected, schedule to change data path */
|
||||
static int netvsc_vf_changed(struct net_device *vf_netdev,
|
||||
struct net_device *ndev)
|
||||
static int netvsc_vf_changed(struct net_device *vf_netdev)
|
||||
{
|
||||
struct net_device_context *net_device_ctx;
|
||||
struct netvsc_device *netvsc_dev;
|
||||
struct net_device *ndev;
|
||||
bool vf_is_up = netif_running(vf_netdev);
|
||||
|
||||
ndev = get_netvsc_byref(vf_netdev);
|
||||
if (!ndev)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
net_device_ctx = netdev_priv(ndev);
|
||||
netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
|
||||
if (!netvsc_dev)
|
||||
return -ENODEV;
|
||||
return NOTIFY_DONE;
|
||||
|
||||
netvsc_switch_datapath(ndev, vf_is_up);
|
||||
netdev_info(ndev, "Data path switched %s VF: %s\n",
|
||||
vf_is_up ? "to" : "from", vf_netdev->name);
|
||||
|
||||
return 0;
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static int netvsc_pre_unregister_vf(struct net_device *vf_netdev,
|
||||
struct net_device *ndev)
|
||||
static int netvsc_unregister_vf(struct net_device *vf_netdev)
|
||||
{
|
||||
struct net_device *ndev;
|
||||
struct net_device_context *net_device_ctx;
|
||||
|
||||
ndev = get_netvsc_byref(vf_netdev);
|
||||
if (!ndev)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
net_device_ctx = netdev_priv(ndev);
|
||||
cancel_delayed_work_sync(&net_device_ctx->vf_takeover);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int netvsc_unregister_vf(struct net_device *vf_netdev,
|
||||
struct net_device *ndev)
|
||||
{
|
||||
struct net_device_context *net_device_ctx;
|
||||
|
||||
net_device_ctx = netdev_priv(ndev);
|
||||
|
||||
netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
|
||||
|
||||
netdev_rx_handler_unregister(vf_netdev);
|
||||
netdev_upper_dev_unlink(vf_netdev, ndev);
|
||||
RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
|
||||
dev_put(vf_netdev);
|
||||
|
||||
return 0;
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct failover_ops netvsc_failover_ops = {
|
||||
.slave_pre_register = netvsc_pre_register_vf,
|
||||
.slave_register = netvsc_register_vf,
|
||||
.slave_pre_unregister = netvsc_pre_unregister_vf,
|
||||
.slave_unregister = netvsc_unregister_vf,
|
||||
.slave_link_change = netvsc_vf_changed,
|
||||
.slave_handle_frame = netvsc_vf_handle_frame,
|
||||
};
|
||||
|
||||
static int netvsc_probe(struct hv_device *dev,
|
||||
const struct hv_vmbus_device_id *dev_id)
|
||||
{
|
||||
|
@ -2024,23 +2104,19 @@ static int netvsc_probe(struct hv_device *dev,
|
|||
else
|
||||
net->max_mtu = ETH_DATA_LEN;
|
||||
|
||||
ret = register_netdev(net);
|
||||
rtnl_lock();
|
||||
ret = register_netdevice(net);
|
||||
if (ret != 0) {
|
||||
pr_err("Unable to register netdev.\n");
|
||||
goto register_failed;
|
||||
}
|
||||
|
||||
net_device_ctx->failover = failover_register(net, &netvsc_failover_ops);
|
||||
if (IS_ERR(net_device_ctx->failover)) {
|
||||
ret = PTR_ERR(net_device_ctx->failover);
|
||||
goto err_failover;
|
||||
}
|
||||
list_add(&net_device_ctx->list, &netvsc_dev_list);
|
||||
rtnl_unlock();
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
|
||||
err_failover:
|
||||
unregister_netdev(net);
|
||||
register_failed:
|
||||
rtnl_unlock();
|
||||
rndis_filter_device_remove(dev, nvdev);
|
||||
rndis_failed:
|
||||
free_percpu(net_device_ctx->vf_stats);
|
||||
|
@ -2080,14 +2156,13 @@ static int netvsc_remove(struct hv_device *dev)
|
|||
rtnl_lock();
|
||||
vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
|
||||
if (vf_netdev)
|
||||
failover_slave_unregister(vf_netdev);
|
||||
netvsc_unregister_vf(vf_netdev);
|
||||
|
||||
if (nvdev)
|
||||
rndis_filter_device_remove(dev, nvdev);
|
||||
|
||||
unregister_netdevice(net);
|
||||
|
||||
failover_unregister(ndev_ctx->failover);
|
||||
list_del(&ndev_ctx->list);
|
||||
|
||||
rtnl_unlock();
|
||||
rcu_read_unlock();
|
||||
|
@ -2115,8 +2190,54 @@ static struct hv_driver netvsc_drv = {
|
|||
.remove = netvsc_remove,
|
||||
};
|
||||
|
||||
/*
|
||||
* On Hyper-V, every VF interface is matched with a corresponding
|
||||
* synthetic interface. The synthetic interface is presented first
|
||||
* to the guest. When the corresponding VF instance is registered,
|
||||
* we will take care of switching the data path.
|
||||
*/
|
||||
static int netvsc_netdev_event(struct notifier_block *this,
|
||||
unsigned long event, void *ptr)
|
||||
{
|
||||
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
|
||||
|
||||
/* Skip our own events */
|
||||
if (event_dev->netdev_ops == &device_ops)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
/* Avoid non-Ethernet type devices */
|
||||
if (event_dev->type != ARPHRD_ETHER)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
/* Avoid Vlan dev with same MAC registering as VF */
|
||||
if (is_vlan_dev(event_dev))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
/* Avoid Bonding master dev with same MAC registering as VF */
|
||||
if ((event_dev->priv_flags & IFF_BONDING) &&
|
||||
(event_dev->flags & IFF_MASTER))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
switch (event) {
|
||||
case NETDEV_REGISTER:
|
||||
return netvsc_register_vf(event_dev);
|
||||
case NETDEV_UNREGISTER:
|
||||
return netvsc_unregister_vf(event_dev);
|
||||
case NETDEV_UP:
|
||||
case NETDEV_DOWN:
|
||||
return netvsc_vf_changed(event_dev);
|
||||
default:
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
}
|
||||
|
||||
static struct notifier_block netvsc_netdev_notifier = {
|
||||
.notifier_call = netvsc_netdev_event,
|
||||
};
|
||||
|
||||
static void __exit netvsc_drv_exit(void)
|
||||
{
|
||||
unregister_netdevice_notifier(&netvsc_netdev_notifier);
|
||||
vmbus_driver_unregister(&netvsc_drv);
|
||||
}
|
||||
|
||||
|
@ -2135,6 +2256,7 @@ static int __init netvsc_drv_init(void)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
register_netdevice_notifier(&netvsc_netdev_notifier);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -26,10 +26,7 @@
|
|||
#include <linux/platform_device.h>
|
||||
#include <linux/mdio-bitbang.h>
|
||||
#include <linux/mdio-gpio.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
|
||||
#include <linux/of_gpio.h>
|
||||
#include <linux/of_mdio.h>
|
||||
|
||||
struct mdio_gpio_info {
|
||||
|
|
|
@ -3572,11 +3572,14 @@ static int __init init_mac80211_hwsim(void)
|
|||
hwsim_wq = alloc_workqueue("hwsim_wq", 0, 0);
|
||||
if (!hwsim_wq)
|
||||
return -ENOMEM;
|
||||
rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params);
|
||||
|
||||
err = rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params);
|
||||
if (err)
|
||||
goto out_free_wq;
|
||||
|
||||
err = register_pernet_device(&hwsim_net_ops);
|
||||
if (err)
|
||||
return err;
|
||||
goto out_free_rht;
|
||||
|
||||
err = platform_driver_register(&mac80211_hwsim_driver);
|
||||
if (err)
|
||||
|
@ -3701,6 +3704,10 @@ static int __init init_mac80211_hwsim(void)
|
|||
platform_driver_unregister(&mac80211_hwsim_driver);
|
||||
out_unregister_pernet:
|
||||
unregister_pernet_device(&hwsim_net_ops);
|
||||
out_free_rht:
|
||||
rhashtable_destroy(&hwsim_radios_rht);
|
||||
out_free_wq:
|
||||
destroy_workqueue(hwsim_wq);
|
||||
return err;
|
||||
}
|
||||
module_init(init_mac80211_hwsim);
|
||||
|
|
|
@ -239,7 +239,7 @@ static void rx_refill_timeout(struct timer_list *t)
|
|||
static int netfront_tx_slot_available(struct netfront_queue *queue)
|
||||
{
|
||||
return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
|
||||
(NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2);
|
||||
(NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
|
||||
}
|
||||
|
||||
static void xennet_maybe_wake_tx(struct netfront_queue *queue)
|
||||
|
@ -790,7 +790,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
|
|||
RING_IDX cons = queue->rx.rsp_cons;
|
||||
struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
|
||||
grant_ref_t ref = xennet_get_rx_ref(queue, cons);
|
||||
int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
|
||||
int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
|
||||
int slots = 1;
|
||||
int err = 0;
|
||||
unsigned long ret;
|
||||
|
|
|
@ -345,7 +345,7 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
|
|||
|
||||
rcu_read_lock();
|
||||
nat_hook = rcu_dereference(nf_nat_hook);
|
||||
if (nat_hook->decode_session)
|
||||
if (nat_hook && nat_hook->decode_session)
|
||||
nat_hook->decode_session(skb, fl);
|
||||
rcu_read_unlock();
|
||||
#endif
|
||||
|
|
|
@ -23,6 +23,9 @@
|
|||
/* Set is defined with timeout support: timeout value may be 0 */
|
||||
#define IPSET_NO_TIMEOUT UINT_MAX
|
||||
|
||||
/* Max timeout value, see msecs_to_jiffies() in jiffies.h */
|
||||
#define IPSET_MAX_TIMEOUT (UINT_MAX >> 1)/MSEC_PER_SEC
|
||||
|
||||
#define ip_set_adt_opt_timeout(opt, set) \
|
||||
((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout)
|
||||
|
||||
|
@ -32,11 +35,10 @@ ip_set_timeout_uget(struct nlattr *tb)
|
|||
unsigned int timeout = ip_set_get_h32(tb);
|
||||
|
||||
/* Normalize to fit into jiffies */
|
||||
if (timeout > UINT_MAX/MSEC_PER_SEC)
|
||||
timeout = UINT_MAX/MSEC_PER_SEC;
|
||||
if (timeout > IPSET_MAX_TIMEOUT)
|
||||
timeout = IPSET_MAX_TIMEOUT;
|
||||
|
||||
/* Userspace supplied TIMEOUT parameter: adjust crazy size */
|
||||
return timeout == IPSET_NO_TIMEOUT ? IPSET_NO_TIMEOUT - 1 : timeout;
|
||||
return timeout;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
|
@ -65,8 +67,14 @@ ip_set_timeout_set(unsigned long *timeout, u32 value)
|
|||
static inline u32
|
||||
ip_set_timeout_get(const unsigned long *timeout)
|
||||
{
|
||||
return *timeout == IPSET_ELEM_PERMANENT ? 0 :
|
||||
jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
|
||||
u32 t;
|
||||
|
||||
if (*timeout == IPSET_ELEM_PERMANENT)
|
||||
return 0;
|
||||
|
||||
t = jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
|
||||
/* Zero value in userspace means no timeout */
|
||||
return t == 0 ? 1 : t;
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
|
|
@ -631,6 +631,7 @@ struct ip_vs_service {
|
|||
|
||||
/* alternate persistence engine */
|
||||
struct ip_vs_pe __rcu *pe;
|
||||
int conntrack_afmask;
|
||||
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
@ -1611,6 +1612,35 @@ static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp,
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline int ip_vs_register_conntrack(struct ip_vs_service *svc)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
||||
int afmask = (svc->af == AF_INET6) ? 2 : 1;
|
||||
int ret = 0;
|
||||
|
||||
if (!(svc->conntrack_afmask & afmask)) {
|
||||
ret = nf_ct_netns_get(svc->ipvs->net, svc->af);
|
||||
if (ret >= 0)
|
||||
svc->conntrack_afmask |= afmask;
|
||||
}
|
||||
return ret;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void ip_vs_unregister_conntrack(struct ip_vs_service *svc)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
||||
int afmask = (svc->af == AF_INET6) ? 2 : 1;
|
||||
|
||||
if (svc->conntrack_afmask & afmask) {
|
||||
nf_ct_netns_put(svc->ipvs->net, svc->af);
|
||||
svc->conntrack_afmask &= ~afmask;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int
|
||||
ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
|
||||
{
|
||||
|
|
|
@ -20,7 +20,8 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
|
|||
bool *addit);
|
||||
|
||||
bool nf_conncount_add(struct hlist_head *head,
|
||||
const struct nf_conntrack_tuple *tuple);
|
||||
const struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_zone *zone);
|
||||
|
||||
void nf_conncount_cache_free(struct hlist_head *hhead);
|
||||
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _NFT_DUP_H_
|
||||
#define _NFT_DUP_H_
|
||||
|
||||
struct nft_dup_inet {
|
||||
enum nft_registers sreg_addr:8;
|
||||
enum nft_registers sreg_dev:8;
|
||||
};
|
||||
|
||||
#endif /* _NFT_DUP_H_ */
|
|
@ -1133,6 +1133,11 @@ struct sctp_input_cb {
|
|||
};
|
||||
#define SCTP_INPUT_CB(__skb) ((struct sctp_input_cb *)&((__skb)->cb[0]))
|
||||
|
||||
struct sctp_output_cb {
|
||||
struct sk_buff *last;
|
||||
};
|
||||
#define SCTP_OUTPUT_CB(__skb) ((struct sctp_output_cb *)&((__skb)->cb[0]))
|
||||
|
||||
static inline const struct sk_buff *sctp_gso_headskb(const struct sk_buff *skb)
|
||||
{
|
||||
const struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
|
||||
|
|
|
@ -109,8 +109,7 @@ struct tls_sw_context_rx {
|
|||
|
||||
struct strparser strp;
|
||||
void (*saved_data_ready)(struct sock *sk);
|
||||
unsigned int (*sk_poll)(struct file *file, struct socket *sock,
|
||||
struct poll_table_struct *wait);
|
||||
__poll_t (*sk_poll_mask)(struct socket *sock, __poll_t events);
|
||||
struct sk_buff *recv_pkt;
|
||||
u8 control;
|
||||
bool decrypted;
|
||||
|
@ -225,8 +224,7 @@ void tls_sw_free_resources_tx(struct sock *sk);
|
|||
void tls_sw_free_resources_rx(struct sock *sk);
|
||||
int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
||||
int nonblock, int flags, int *addr_len);
|
||||
unsigned int tls_sw_poll(struct file *file, struct socket *sock,
|
||||
struct poll_table_struct *wait);
|
||||
__poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events);
|
||||
ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
|
||||
struct pipe_inode_info *pipe,
|
||||
size_t len, unsigned int flags);
|
||||
|
|
|
@ -112,7 +112,7 @@ enum ip_conntrack_status {
|
|||
IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING |
|
||||
IPS_SEQ_ADJUST | IPS_TEMPLATE | IPS_OFFLOAD),
|
||||
|
||||
__IPS_MAX_BIT = 14,
|
||||
__IPS_MAX_BIT = 15,
|
||||
};
|
||||
|
||||
/* Connection tracking event types */
|
||||
|
|
|
@ -266,7 +266,7 @@ enum nft_rule_compat_attributes {
|
|||
* @NFT_SET_INTERVAL: set contains intervals
|
||||
* @NFT_SET_MAP: set is used as a dictionary
|
||||
* @NFT_SET_TIMEOUT: set uses timeouts
|
||||
* @NFT_SET_EVAL: set contains expressions for evaluation
|
||||
* @NFT_SET_EVAL: set can be updated from the evaluation path
|
||||
* @NFT_SET_OBJECT: set contains stateful objects
|
||||
*/
|
||||
enum nft_set_flags {
|
||||
|
|
|
@ -981,18 +981,18 @@
|
|||
* only the %NL80211_ATTR_IE data is used and updated with this command.
|
||||
*
|
||||
* @NL80211_CMD_SET_PMK: For offloaded 4-Way handshake, set the PMK or PMK-R0
|
||||
* for the given authenticator address (specified with &NL80211_ATTR_MAC).
|
||||
* When &NL80211_ATTR_PMKR0_NAME is set, &NL80211_ATTR_PMK specifies the
|
||||
* for the given authenticator address (specified with %NL80211_ATTR_MAC).
|
||||
* When %NL80211_ATTR_PMKR0_NAME is set, %NL80211_ATTR_PMK specifies the
|
||||
* PMK-R0, otherwise it specifies the PMK.
|
||||
* @NL80211_CMD_DEL_PMK: For offloaded 4-Way handshake, delete the previously
|
||||
* configured PMK for the authenticator address identified by
|
||||
* &NL80211_ATTR_MAC.
|
||||
* %NL80211_ATTR_MAC.
|
||||
* @NL80211_CMD_PORT_AUTHORIZED: An event that indicates that the 4 way
|
||||
* handshake was completed successfully by the driver. The BSSID is
|
||||
* specified with &NL80211_ATTR_MAC. Drivers that support 4 way handshake
|
||||
* specified with %NL80211_ATTR_MAC. Drivers that support 4 way handshake
|
||||
* offload should send this event after indicating 802.11 association with
|
||||
* &NL80211_CMD_CONNECT or &NL80211_CMD_ROAM. If the 4 way handshake failed
|
||||
* &NL80211_CMD_DISCONNECT should be indicated instead.
|
||||
* %NL80211_CMD_CONNECT or %NL80211_CMD_ROAM. If the 4 way handshake failed
|
||||
* %NL80211_CMD_DISCONNECT should be indicated instead.
|
||||
*
|
||||
* @NL80211_CMD_CONTROL_PORT_FRAME: Control Port (e.g. PAE) frame TX request
|
||||
* and RX notification. This command is used both as a request to transmit
|
||||
|
@ -1029,9 +1029,9 @@
|
|||
* initiated the connection through the connect request.
|
||||
*
|
||||
* @NL80211_CMD_STA_OPMODE_CHANGED: An event that notify station's
|
||||
* ht opmode or vht opmode changes using any of &NL80211_ATTR_SMPS_MODE,
|
||||
* &NL80211_ATTR_CHANNEL_WIDTH,&NL80211_ATTR_NSS attributes with its
|
||||
* address(specified in &NL80211_ATTR_MAC).
|
||||
* ht opmode or vht opmode changes using any of %NL80211_ATTR_SMPS_MODE,
|
||||
* %NL80211_ATTR_CHANNEL_WIDTH,%NL80211_ATTR_NSS attributes with its
|
||||
* address(specified in %NL80211_ATTR_MAC).
|
||||
*
|
||||
* @NL80211_CMD_MAX: highest used command number
|
||||
* @__NL80211_CMD_AFTER_LAST: internal use
|
||||
|
@ -2218,7 +2218,7 @@ enum nl80211_commands {
|
|||
* @NL80211_ATTR_EXTERNAL_AUTH_ACTION: Identify the requested external
|
||||
* authentication operation (u32 attribute with an
|
||||
* &enum nl80211_external_auth_action value). This is used with the
|
||||
* &NL80211_CMD_EXTERNAL_AUTH request event.
|
||||
* %NL80211_CMD_EXTERNAL_AUTH request event.
|
||||
* @NL80211_ATTR_EXTERNAL_AUTH_SUPPORT: Flag attribute indicating that the user
|
||||
* space supports external authentication. This attribute shall be used
|
||||
* only with %NL80211_CMD_CONNECT request. The driver may offload
|
||||
|
@ -3491,7 +3491,7 @@ enum nl80211_sched_scan_match_attr {
|
|||
* @NL80211_RRF_AUTO_BW: maximum available bandwidth should be calculated
|
||||
* base on contiguous rules and wider channels will be allowed to cross
|
||||
* multiple contiguous/overlapping frequency ranges.
|
||||
* @NL80211_RRF_IR_CONCURRENT: See &NL80211_FREQUENCY_ATTR_IR_CONCURRENT
|
||||
* @NL80211_RRF_IR_CONCURRENT: See %NL80211_FREQUENCY_ATTR_IR_CONCURRENT
|
||||
* @NL80211_RRF_NO_HT40MINUS: channels can't be used in HT40- operation
|
||||
* @NL80211_RRF_NO_HT40PLUS: channels can't be used in HT40+ operation
|
||||
* @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed
|
||||
|
@ -5643,11 +5643,11 @@ enum nl80211_nan_func_attributes {
|
|||
* @NL80211_NAN_SRF_INCLUDE: present if the include bit of the SRF set.
|
||||
* This is a flag.
|
||||
* @NL80211_NAN_SRF_BF: Bloom Filter. Present if and only if
|
||||
* &NL80211_NAN_SRF_MAC_ADDRS isn't present. This attribute is binary.
|
||||
* %NL80211_NAN_SRF_MAC_ADDRS isn't present. This attribute is binary.
|
||||
* @NL80211_NAN_SRF_BF_IDX: index of the Bloom Filter. Mandatory if
|
||||
* &NL80211_NAN_SRF_BF is present. This is a u8.
|
||||
* %NL80211_NAN_SRF_BF is present. This is a u8.
|
||||
* @NL80211_NAN_SRF_MAC_ADDRS: list of MAC addresses for the SRF. Present if
|
||||
* and only if &NL80211_NAN_SRF_BF isn't present. This is a nested
|
||||
* and only if %NL80211_NAN_SRF_BF isn't present. This is a nested
|
||||
* attribute. Each nested attribute is a MAC address.
|
||||
* @NUM_NL80211_NAN_SRF_ATTR: internal
|
||||
* @NL80211_NAN_SRF_ATTR_MAX: highest NAN SRF attribute
|
||||
|
|
|
@ -295,6 +295,15 @@ static const struct file_operations bpffs_map_fops = {
|
|||
.release = bpffs_map_release,
|
||||
};
|
||||
|
||||
static int bpffs_obj_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static const struct file_operations bpffs_obj_fops = {
|
||||
.open = bpffs_obj_open,
|
||||
};
|
||||
|
||||
static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw,
|
||||
const struct inode_operations *iops,
|
||||
const struct file_operations *fops)
|
||||
|
@ -314,7 +323,8 @@ static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw,
|
|||
|
||||
static int bpf_mkprog(struct dentry *dentry, umode_t mode, void *arg)
|
||||
{
|
||||
return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops, NULL);
|
||||
return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops,
|
||||
&bpffs_obj_fops);
|
||||
}
|
||||
|
||||
static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
|
||||
|
@ -322,7 +332,7 @@ static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
|
|||
struct bpf_map *map = arg;
|
||||
|
||||
return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops,
|
||||
map->btf ? &bpffs_map_fops : NULL);
|
||||
map->btf ? &bpffs_map_fops : &bpffs_obj_fops);
|
||||
}
|
||||
|
||||
static struct dentry *
|
||||
|
|
|
@ -411,6 +411,12 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
|
|||
watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
|
||||
if (IS_ERR(watcher))
|
||||
return PTR_ERR(watcher);
|
||||
|
||||
if (watcher->family != NFPROTO_BRIDGE) {
|
||||
module_put(watcher->me);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
w->u.watcher = watcher;
|
||||
|
||||
par->target = watcher;
|
||||
|
@ -709,6 +715,8 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
|
|||
}
|
||||
i = 0;
|
||||
|
||||
memset(&mtpar, 0, sizeof(mtpar));
|
||||
memset(&tgpar, 0, sizeof(tgpar));
|
||||
mtpar.net = tgpar.net = net;
|
||||
mtpar.table = tgpar.table = name;
|
||||
mtpar.entryinfo = tgpar.entryinfo = e;
|
||||
|
@ -730,6 +738,13 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
|
|||
goto cleanup_watchers;
|
||||
}
|
||||
|
||||
/* Reject UNSPEC, xtables verdicts/return values are incompatible */
|
||||
if (target->family != NFPROTO_BRIDGE) {
|
||||
module_put(target->me);
|
||||
ret = -ENOENT;
|
||||
goto cleanup_watchers;
|
||||
}
|
||||
|
||||
t->u.target = target;
|
||||
if (t->u.target == &ebt_standard_target) {
|
||||
if (gap < sizeof(struct ebt_standard_target)) {
|
||||
|
@ -1606,16 +1621,16 @@ struct compat_ebt_entry_mwt {
|
|||
compat_uptr_t ptr;
|
||||
} u;
|
||||
compat_uint_t match_size;
|
||||
compat_uint_t data[0];
|
||||
compat_uint_t data[0] __attribute__ ((aligned (__alignof__(struct compat_ebt_replace))));
|
||||
};
|
||||
|
||||
/* account for possible padding between match_size and ->data */
|
||||
static int ebt_compat_entry_padsize(void)
|
||||
{
|
||||
BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
|
||||
COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
|
||||
return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
|
||||
COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
|
||||
BUILD_BUG_ON(sizeof(struct ebt_entry_match) <
|
||||
sizeof(struct compat_ebt_entry_mwt));
|
||||
return (int) sizeof(struct ebt_entry_match) -
|
||||
sizeof(struct compat_ebt_entry_mwt);
|
||||
}
|
||||
|
||||
static int ebt_compat_match_offset(const struct xt_match *match,
|
||||
|
|
|
@ -261,7 +261,7 @@ static void nft_reject_br_send_v6_unreach(struct net *net,
|
|||
if (!reject6_br_csum_ok(oldskb, hook))
|
||||
return;
|
||||
|
||||
nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) +
|
||||
nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) +
|
||||
LL_MAX_HEADER + len, GFP_ATOMIC);
|
||||
if (!nskb)
|
||||
return;
|
||||
|
|
|
@ -119,13 +119,14 @@ unsigned long neigh_rand_reach_time(unsigned long base)
|
|||
EXPORT_SYMBOL(neigh_rand_reach_time);
|
||||
|
||||
|
||||
static bool neigh_del(struct neighbour *n, __u8 state,
|
||||
static bool neigh_del(struct neighbour *n, __u8 state, __u8 flags,
|
||||
struct neighbour __rcu **np, struct neigh_table *tbl)
|
||||
{
|
||||
bool retval = false;
|
||||
|
||||
write_lock(&n->lock);
|
||||
if (refcount_read(&n->refcnt) == 1 && !(n->nud_state & state)) {
|
||||
if (refcount_read(&n->refcnt) == 1 && !(n->nud_state & state) &&
|
||||
!(n->flags & flags)) {
|
||||
struct neighbour *neigh;
|
||||
|
||||
neigh = rcu_dereference_protected(n->next,
|
||||
|
@ -157,7 +158,7 @@ bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
|
|||
while ((n = rcu_dereference_protected(*np,
|
||||
lockdep_is_held(&tbl->lock)))) {
|
||||
if (n == ndel)
|
||||
return neigh_del(n, 0, np, tbl);
|
||||
return neigh_del(n, 0, 0, np, tbl);
|
||||
np = &n->next;
|
||||
}
|
||||
return false;
|
||||
|
@ -185,7 +186,8 @@ static int neigh_forced_gc(struct neigh_table *tbl)
|
|||
* - nobody refers to it.
|
||||
* - it is not permanent
|
||||
*/
|
||||
if (neigh_del(n, NUD_PERMANENT, np, tbl)) {
|
||||
if (neigh_del(n, NUD_PERMANENT, NTF_EXT_LEARNED, np,
|
||||
tbl)) {
|
||||
shrunk = 1;
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -728,22 +728,9 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
|
|||
sock_valbool_flag(sk, SOCK_DBG, valbool);
|
||||
break;
|
||||
case SO_REUSEADDR:
|
||||
val = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
|
||||
if ((sk->sk_family == PF_INET || sk->sk_family == PF_INET6) &&
|
||||
inet_sk(sk)->inet_num &&
|
||||
(sk->sk_reuse != val)) {
|
||||
ret = (sk->sk_state == TCP_ESTABLISHED) ? -EISCONN : -EUCLEAN;
|
||||
break;
|
||||
}
|
||||
sk->sk_reuse = val;
|
||||
sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
|
||||
break;
|
||||
case SO_REUSEPORT:
|
||||
if ((sk->sk_family == PF_INET || sk->sk_family == PF_INET6) &&
|
||||
inet_sk(sk)->inet_num &&
|
||||
(sk->sk_reuseport != valbool)) {
|
||||
ret = (sk->sk_state == TCP_ESTABLISHED) ? -EISCONN : -EUCLEAN;
|
||||
break;
|
||||
}
|
||||
sk->sk_reuseport = valbool;
|
||||
break;
|
||||
case SO_TYPE:
|
||||
|
|
|
@ -75,7 +75,8 @@ static struct sk_buff *trailer_rcv(struct sk_buff *skb, struct net_device *dev,
|
|||
if (!skb->dev)
|
||||
return NULL;
|
||||
|
||||
pskb_trim_rcsum(skb, skb->len - 4);
|
||||
if (pskb_trim_rcsum(skb, skb->len - 4))
|
||||
return NULL;
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
|
|
@ -531,6 +531,7 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
|
|||
return -ENOMEM;
|
||||
|
||||
j = 0;
|
||||
memset(&mtpar, 0, sizeof(mtpar));
|
||||
mtpar.net = net;
|
||||
mtpar.table = name;
|
||||
mtpar.entryinfo = &e->ip;
|
||||
|
|
|
@ -1730,6 +1730,10 @@ int tcp_v4_rcv(struct sk_buff *skb)
|
|||
reqsk_put(req);
|
||||
goto discard_it;
|
||||
}
|
||||
if (tcp_checksum_complete(skb)) {
|
||||
reqsk_put(req);
|
||||
goto csum_error;
|
||||
}
|
||||
if (unlikely(sk->sk_state != TCP_LISTEN)) {
|
||||
inet_csk_reqsk_queue_drop_and_put(sk, req);
|
||||
goto lookup;
|
||||
|
|
|
@ -268,8 +268,6 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
|
|||
goto out_check_final;
|
||||
}
|
||||
|
||||
p = *head;
|
||||
th2 = tcp_hdr(p);
|
||||
tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
|
||||
|
||||
out_check_final:
|
||||
|
|
|
@ -1324,6 +1324,7 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp,
|
|||
}
|
||||
}
|
||||
|
||||
memset(&cfg, 0, sizeof(cfg));
|
||||
cfg.valid_lft = min_t(__u32, ifp->valid_lft,
|
||||
idev->cnf.temp_valid_lft + age);
|
||||
cfg.preferred_lft = cnf_temp_preferred_lft + age - idev->desync_factor;
|
||||
|
@ -1357,7 +1358,6 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp,
|
|||
|
||||
cfg.pfx = &addr;
|
||||
cfg.scope = ipv6_addr_scope(cfg.pfx);
|
||||
cfg.rt_priority = 0;
|
||||
|
||||
ift = ipv6_add_addr(idev, &cfg, block, NULL);
|
||||
if (IS_ERR(ift)) {
|
||||
|
|
|
@ -934,6 +934,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
|
|||
{
|
||||
struct fib6_info *leaf = rcu_dereference_protected(fn->leaf,
|
||||
lockdep_is_held(&rt->fib6_table->tb6_lock));
|
||||
enum fib_event_type event = FIB_EVENT_ENTRY_ADD;
|
||||
struct fib6_info *iter = NULL, *match = NULL;
|
||||
struct fib6_info __rcu **ins;
|
||||
int replace = (info->nlh &&
|
||||
|
@ -1013,6 +1014,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
|
|||
"Can not append to a REJECT route");
|
||||
return -EINVAL;
|
||||
}
|
||||
event = FIB_EVENT_ENTRY_APPEND;
|
||||
rt->fib6_nsiblings = match->fib6_nsiblings;
|
||||
list_add_tail(&rt->fib6_siblings, &match->fib6_siblings);
|
||||
match->fib6_nsiblings++;
|
||||
|
@ -1034,15 +1036,12 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
|
|||
* insert node
|
||||
*/
|
||||
if (!replace) {
|
||||
enum fib_event_type event;
|
||||
|
||||
if (!add)
|
||||
pr_warn("NLM_F_CREATE should be set when creating new route\n");
|
||||
|
||||
add:
|
||||
nlflags |= NLM_F_CREATE;
|
||||
|
||||
event = append ? FIB_EVENT_ENTRY_APPEND : FIB_EVENT_ENTRY_ADD;
|
||||
err = call_fib6_entry_notifiers(info->nl_net, event, rt,
|
||||
extack);
|
||||
if (err)
|
||||
|
|
|
@ -550,6 +550,7 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
|
|||
return -ENOMEM;
|
||||
|
||||
j = 0;
|
||||
memset(&mtpar, 0, sizeof(mtpar));
|
||||
mtpar.net = net;
|
||||
mtpar.table = name;
|
||||
mtpar.entryinfo = &e->ipv6;
|
||||
|
|
|
@ -2307,9 +2307,6 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
|
|||
const struct in6_addr *daddr, *saddr;
|
||||
struct rt6_info *rt6 = (struct rt6_info *)dst;
|
||||
|
||||
if (rt6->rt6i_flags & RTF_LOCAL)
|
||||
return;
|
||||
|
||||
if (dst_metric_locked(dst, RTAX_MTU))
|
||||
return;
|
||||
|
||||
|
|
|
@ -1479,6 +1479,10 @@ static int tcp_v6_rcv(struct sk_buff *skb)
|
|||
reqsk_put(req);
|
||||
goto discard_it;
|
||||
}
|
||||
if (tcp_checksum_complete(skb)) {
|
||||
reqsk_put(req);
|
||||
goto csum_error;
|
||||
}
|
||||
if (unlikely(sk->sk_state != TCP_LISTEN)) {
|
||||
inet_csk_reqsk_queue_drop_and_put(sk, req);
|
||||
goto lookup;
|
||||
|
|
|
@ -553,6 +553,12 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
|
|||
goto out_tunnel;
|
||||
}
|
||||
|
||||
/* L2TPv2 only accepts PPP pseudo-wires */
|
||||
if (tunnel->version == 2 && cfg.pw_type != L2TP_PWTYPE_PPP) {
|
||||
ret = -EPROTONOSUPPORT;
|
||||
goto out_tunnel;
|
||||
}
|
||||
|
||||
if (tunnel->version > 2) {
|
||||
if (info->attrs[L2TP_ATTR_DATA_SEQ])
|
||||
cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]);
|
||||
|
|
|
@ -612,6 +612,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
|
|||
u32 session_id, peer_session_id;
|
||||
bool drop_refcnt = false;
|
||||
bool drop_tunnel = false;
|
||||
bool new_session = false;
|
||||
bool new_tunnel = false;
|
||||
int ver = 2;
|
||||
int fd;
|
||||
|
||||
|
@ -701,6 +703,15 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
|
|||
.encap = L2TP_ENCAPTYPE_UDP,
|
||||
.debug = 0,
|
||||
};
|
||||
|
||||
/* Prevent l2tp_tunnel_register() from trying to set up
|
||||
* a kernel socket.
|
||||
*/
|
||||
if (fd < 0) {
|
||||
error = -EBADF;
|
||||
goto end;
|
||||
}
|
||||
|
||||
error = l2tp_tunnel_create(sock_net(sk), fd, ver, tunnel_id, peer_tunnel_id, &tcfg, &tunnel);
|
||||
if (error < 0)
|
||||
goto end;
|
||||
|
@ -713,6 +724,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
|
|||
goto end;
|
||||
}
|
||||
drop_tunnel = true;
|
||||
new_tunnel = true;
|
||||
}
|
||||
} else {
|
||||
/* Error if we can't find the tunnel */
|
||||
|
@ -734,6 +746,12 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
|
|||
session = l2tp_session_get(sock_net(sk), tunnel, session_id);
|
||||
if (session) {
|
||||
drop_refcnt = true;
|
||||
|
||||
if (session->pwtype != L2TP_PWTYPE_PPP) {
|
||||
error = -EPROTOTYPE;
|
||||
goto end;
|
||||
}
|
||||
|
||||
ps = l2tp_session_priv(session);
|
||||
|
||||
/* Using a pre-existing session is fine as long as it hasn't
|
||||
|
@ -751,6 +769,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
|
|||
/* Default MTU must allow space for UDP/L2TP/PPP headers */
|
||||
cfg.mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
|
||||
cfg.mru = cfg.mtu;
|
||||
cfg.pw_type = L2TP_PWTYPE_PPP;
|
||||
|
||||
session = l2tp_session_create(sizeof(struct pppol2tp_session),
|
||||
tunnel, session_id,
|
||||
|
@ -772,6 +791,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
|
|||
goto end;
|
||||
}
|
||||
drop_refcnt = true;
|
||||
new_session = true;
|
||||
}
|
||||
|
||||
/* Special case: if source & dest session_id == 0x0000, this
|
||||
|
@ -818,6 +838,12 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
|
|||
session->name);
|
||||
|
||||
end:
|
||||
if (error) {
|
||||
if (new_session)
|
||||
l2tp_session_delete(session);
|
||||
if (new_tunnel)
|
||||
l2tp_tunnel_delete(tunnel);
|
||||
}
|
||||
if (drop_refcnt)
|
||||
l2tp_session_dec_refcount(session);
|
||||
if (drop_tunnel)
|
||||
|
@ -1175,7 +1201,7 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
|
|||
l2tp_session_get(sock_net(sk), tunnel,
|
||||
stats.session_id);
|
||||
|
||||
if (session) {
|
||||
if (session && session->pwtype == L2TP_PWTYPE_PPP) {
|
||||
err = pppol2tp_session_ioctl(session, cmd,
|
||||
arg);
|
||||
l2tp_session_dec_refcount(session);
|
||||
|
|
|
@ -1098,6 +1098,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
|
|||
|
||||
ieee80211_led_init(local);
|
||||
|
||||
result = ieee80211_txq_setup_flows(local);
|
||||
if (result)
|
||||
goto fail_flows;
|
||||
|
||||
rtnl_lock();
|
||||
|
||||
result = ieee80211_init_rate_ctrl_alg(local,
|
||||
|
@ -1120,10 +1124,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
|
|||
|
||||
rtnl_unlock();
|
||||
|
||||
result = ieee80211_txq_setup_flows(local);
|
||||
if (result)
|
||||
goto fail_flows;
|
||||
|
||||
#ifdef CONFIG_INET
|
||||
local->ifa_notifier.notifier_call = ieee80211_ifa_changed;
|
||||
result = register_inetaddr_notifier(&local->ifa_notifier);
|
||||
|
@ -1149,8 +1149,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
|
|||
#if defined(CONFIG_INET) || defined(CONFIG_IPV6)
|
||||
fail_ifa:
|
||||
#endif
|
||||
ieee80211_txq_teardown_flows(local);
|
||||
fail_flows:
|
||||
rtnl_lock();
|
||||
rate_control_deinitialize(local);
|
||||
ieee80211_remove_interfaces(local);
|
||||
|
@ -1158,6 +1156,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
|
|||
rtnl_unlock();
|
||||
ieee80211_led_exit(local);
|
||||
ieee80211_wep_free(local);
|
||||
ieee80211_txq_teardown_flows(local);
|
||||
fail_flows:
|
||||
destroy_workqueue(local->workqueue);
|
||||
fail_workqueue:
|
||||
wiphy_unregister(local->hw.wiphy);
|
||||
|
|
|
@ -1234,7 +1234,10 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
|
|||
pr_debug("Create set %s with family %s\n",
|
||||
set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6");
|
||||
|
||||
#ifndef IP_SET_PROTO_UNDEF
|
||||
#ifdef IP_SET_PROTO_UNDEF
|
||||
if (set->family != NFPROTO_UNSPEC)
|
||||
return -IPSET_ERR_INVALID_FAMILY;
|
||||
#else
|
||||
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
|
||||
return -IPSET_ERR_INVALID_FAMILY;
|
||||
#endif
|
||||
|
|
|
@ -839,6 +839,9 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
|
|||
* For now only for NAT!
|
||||
*/
|
||||
ip_vs_rs_hash(ipvs, dest);
|
||||
/* FTP-NAT requires conntrack for mangling */
|
||||
if (svc->port == FTPPORT)
|
||||
ip_vs_register_conntrack(svc);
|
||||
}
|
||||
atomic_set(&dest->conn_flags, conn_flags);
|
||||
|
||||
|
@ -1462,6 +1465,7 @@ static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup)
|
|||
*/
|
||||
static void ip_vs_unlink_service(struct ip_vs_service *svc, bool cleanup)
|
||||
{
|
||||
ip_vs_unregister_conntrack(svc);
|
||||
/* Hold svc to avoid double release from dest_trash */
|
||||
atomic_inc(&svc->refcnt);
|
||||
/*
|
||||
|
|
|
@ -168,7 +168,7 @@ static inline bool crosses_local_route_boundary(int skb_af, struct sk_buff *skb,
|
|||
bool new_rt_is_local)
|
||||
{
|
||||
bool rt_mode_allow_local = !!(rt_mode & IP_VS_RT_MODE_LOCAL);
|
||||
bool rt_mode_allow_non_local = !!(rt_mode & IP_VS_RT_MODE_LOCAL);
|
||||
bool rt_mode_allow_non_local = !!(rt_mode & IP_VS_RT_MODE_NON_LOCAL);
|
||||
bool rt_mode_allow_redirect = !!(rt_mode & IP_VS_RT_MODE_RDR);
|
||||
bool source_is_loopback;
|
||||
bool old_rt_is_local;
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
struct nf_conncount_tuple {
|
||||
struct hlist_node node;
|
||||
struct nf_conntrack_tuple tuple;
|
||||
struct nf_conntrack_zone zone;
|
||||
};
|
||||
|
||||
struct nf_conncount_rb {
|
||||
|
@ -80,7 +81,8 @@ static int key_diff(const u32 *a, const u32 *b, unsigned int klen)
|
|||
}
|
||||
|
||||
bool nf_conncount_add(struct hlist_head *head,
|
||||
const struct nf_conntrack_tuple *tuple)
|
||||
const struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_zone *zone)
|
||||
{
|
||||
struct nf_conncount_tuple *conn;
|
||||
|
||||
|
@ -88,6 +90,7 @@ bool nf_conncount_add(struct hlist_head *head,
|
|||
if (conn == NULL)
|
||||
return false;
|
||||
conn->tuple = *tuple;
|
||||
conn->zone = *zone;
|
||||
hlist_add_head(&conn->node, head);
|
||||
return true;
|
||||
}
|
||||
|
@ -108,7 +111,7 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
|
|||
|
||||
/* check the saved connections */
|
||||
hlist_for_each_entry_safe(conn, n, head, node) {
|
||||
found = nf_conntrack_find_get(net, zone, &conn->tuple);
|
||||
found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
|
||||
if (found == NULL) {
|
||||
hlist_del(&conn->node);
|
||||
kmem_cache_free(conncount_conn_cachep, conn);
|
||||
|
@ -117,7 +120,8 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
|
|||
|
||||
found_ct = nf_ct_tuplehash_to_ctrack(found);
|
||||
|
||||
if (tuple && nf_ct_tuple_equal(&conn->tuple, tuple)) {
|
||||
if (tuple && nf_ct_tuple_equal(&conn->tuple, tuple) &&
|
||||
nf_ct_zone_equal(found_ct, zone, zone->dir)) {
|
||||
/*
|
||||
* Just to be sure we have it only once in the list.
|
||||
* We should not see tuples twice unless someone hooks
|
||||
|
@ -196,7 +200,7 @@ count_tree(struct net *net, struct rb_root *root,
|
|||
if (!addit)
|
||||
return count;
|
||||
|
||||
if (!nf_conncount_add(&rbconn->hhead, tuple))
|
||||
if (!nf_conncount_add(&rbconn->hhead, tuple, zone))
|
||||
return 0; /* hotdrop */
|
||||
|
||||
return count + 1;
|
||||
|
@ -238,6 +242,7 @@ count_tree(struct net *net, struct rb_root *root,
|
|||
}
|
||||
|
||||
conn->tuple = *tuple;
|
||||
conn->zone = *zone;
|
||||
memcpy(rbconn->key, key, sizeof(u32) * keylen);
|
||||
|
||||
INIT_HLIST_HEAD(&rbconn->hhead);
|
||||
|
|
|
@ -1446,7 +1446,8 @@ ctnetlink_parse_nat_setup(struct nf_conn *ct,
|
|||
}
|
||||
nfnl_lock(NFNL_SUBSYS_CTNETLINK);
|
||||
rcu_read_lock();
|
||||
if (nat_hook->parse_nat_setup)
|
||||
nat_hook = rcu_dereference(nf_nat_hook);
|
||||
if (nat_hook)
|
||||
return -EAGAIN;
|
||||
#endif
|
||||
return -EOPNOTSUPP;
|
||||
|
|
|
@ -2890,12 +2890,13 @@ static struct nft_set *nft_set_lookup_byid(const struct net *net,
|
|||
u32 id = ntohl(nla_get_be32(nla));
|
||||
|
||||
list_for_each_entry(trans, &net->nft.commit_list, list) {
|
||||
struct nft_set *set = nft_trans_set(trans);
|
||||
if (trans->msg_type == NFT_MSG_NEWSET) {
|
||||
struct nft_set *set = nft_trans_set(trans);
|
||||
|
||||
if (trans->msg_type == NFT_MSG_NEWSET &&
|
||||
id == nft_trans_set_id(trans) &&
|
||||
nft_active_genmask(set, genmask))
|
||||
return set;
|
||||
if (id == nft_trans_set_id(trans) &&
|
||||
nft_active_genmask(set, genmask))
|
||||
return set;
|
||||
}
|
||||
}
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
@ -5836,18 +5837,23 @@ static int nf_tables_flowtable_event(struct notifier_block *this,
|
|||
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
||||
struct nft_flowtable *flowtable;
|
||||
struct nft_table *table;
|
||||
struct net *net;
|
||||
|
||||
if (event != NETDEV_UNREGISTER)
|
||||
return 0;
|
||||
|
||||
net = maybe_get_net(dev_net(dev));
|
||||
if (!net)
|
||||
return 0;
|
||||
|
||||
nfnl_lock(NFNL_SUBSYS_NFTABLES);
|
||||
list_for_each_entry(table, &dev_net(dev)->nft.tables, list) {
|
||||
list_for_each_entry(table, &net->nft.tables, list) {
|
||||
list_for_each_entry(flowtable, &table->flowtables, list) {
|
||||
nft_flowtable_event(event, dev, flowtable);
|
||||
}
|
||||
}
|
||||
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
|
||||
|
||||
put_net(net);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
|
@ -6438,7 +6444,7 @@ static void nf_tables_abort_release(struct nft_trans *trans)
|
|||
kfree(trans);
|
||||
}
|
||||
|
||||
static int nf_tables_abort(struct net *net, struct sk_buff *skb)
|
||||
static int __nf_tables_abort(struct net *net)
|
||||
{
|
||||
struct nft_trans *trans, *next;
|
||||
struct nft_trans_elem *te;
|
||||
|
@ -6554,6 +6560,11 @@ static void nf_tables_cleanup(struct net *net)
|
|||
nft_validate_state_update(net, NFT_VALIDATE_SKIP);
|
||||
}
|
||||
|
||||
static int nf_tables_abort(struct net *net, struct sk_buff *skb)
|
||||
{
|
||||
return __nf_tables_abort(net);
|
||||
}
|
||||
|
||||
static bool nf_tables_valid_genid(struct net *net, u32 genid)
|
||||
{
|
||||
return net->nft.base_seq == genid;
|
||||
|
@ -7148,9 +7159,12 @@ static int __net_init nf_tables_init_net(struct net *net)
|
|||
|
||||
static void __net_exit nf_tables_exit_net(struct net *net)
|
||||
{
|
||||
nfnl_lock(NFNL_SUBSYS_NFTABLES);
|
||||
if (!list_empty(&net->nft.commit_list))
|
||||
__nf_tables_abort(net);
|
||||
__nft_release_tables(net);
|
||||
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
|
||||
WARN_ON_ONCE(!list_empty(&net->nft.tables));
|
||||
WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
|
||||
}
|
||||
|
||||
static struct pernet_operations nf_tables_net_ops = {
|
||||
|
@ -7192,13 +7206,13 @@ static int __init nf_tables_module_init(void)
|
|||
|
||||
static void __exit nf_tables_module_exit(void)
|
||||
{
|
||||
unregister_pernet_subsys(&nf_tables_net_ops);
|
||||
nfnetlink_subsys_unregister(&nf_tables_subsys);
|
||||
unregister_netdevice_notifier(&nf_tables_flowtable_notifier);
|
||||
nft_chain_filter_fini();
|
||||
unregister_pernet_subsys(&nf_tables_net_ops);
|
||||
rcu_barrier();
|
||||
nf_tables_core_module_exit();
|
||||
kfree(info);
|
||||
nft_chain_filter_fini();
|
||||
}
|
||||
|
||||
module_init(nf_tables_module_init);
|
||||
|
|
|
@ -183,7 +183,8 @@ nft_do_chain(struct nft_pktinfo *pkt, void *priv)
|
|||
|
||||
switch (regs.verdict.code) {
|
||||
case NFT_JUMP:
|
||||
BUG_ON(stackptr >= NFT_JUMP_STACK_SIZE);
|
||||
if (WARN_ON_ONCE(stackptr >= NFT_JUMP_STACK_SIZE))
|
||||
return NF_DROP;
|
||||
jumpstack[stackptr].chain = chain;
|
||||
jumpstack[stackptr].rules = rules + 1;
|
||||
stackptr++;
|
||||
|
|
|
@ -429,7 +429,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||
*/
|
||||
if (err == -EAGAIN) {
|
||||
status |= NFNL_BATCH_REPLAY;
|
||||
goto next;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
ack:
|
||||
|
@ -456,7 +456,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||
if (err)
|
||||
status |= NFNL_BATCH_FAILURE;
|
||||
}
|
||||
next:
|
||||
|
||||
msglen = NLMSG_ALIGN(nlh->nlmsg_len);
|
||||
if (msglen > skb->len)
|
||||
msglen = skb->len;
|
||||
|
@ -464,7 +464,11 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||
}
|
||||
done:
|
||||
if (status & NFNL_BATCH_REPLAY) {
|
||||
ss->abort(net, oskb);
|
||||
const struct nfnetlink_subsystem *ss2;
|
||||
|
||||
ss2 = nfnl_dereference_protected(subsys_id);
|
||||
if (ss2 == ss)
|
||||
ss->abort(net, oskb);
|
||||
nfnl_err_reset(&err_list);
|
||||
nfnl_unlock(subsys_id);
|
||||
kfree_skb(skb);
|
||||
|
|
|
@ -318,6 +318,10 @@ static int nf_tables_netdev_event(struct notifier_block *this,
|
|||
event != NETDEV_CHANGENAME)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
ctx.net = maybe_get_net(ctx.net);
|
||||
if (!ctx.net)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
nfnl_lock(NFNL_SUBSYS_NFTABLES);
|
||||
list_for_each_entry(table, &ctx.net->nft.tables, list) {
|
||||
if (table->family != NFPROTO_NETDEV)
|
||||
|
@ -334,6 +338,7 @@ static int nf_tables_netdev_event(struct notifier_block *this,
|
|||
}
|
||||
}
|
||||
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
|
||||
put_net(ctx.net);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv,
|
|||
if (!addit)
|
||||
goto out;
|
||||
|
||||
if (!nf_conncount_add(&priv->hhead, tuple_ptr)) {
|
||||
if (!nf_conncount_add(&priv->hhead, tuple_ptr, zone)) {
|
||||
regs->verdict.code = NF_DROP;
|
||||
spin_unlock_bh(&priv->lock);
|
||||
return;
|
||||
|
|
|
@ -203,9 +203,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
|
|||
goto err1;
|
||||
set->ops->gc_init(set);
|
||||
}
|
||||
|
||||
} else if (set->flags & NFT_SET_EVAL)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nft_set_ext_prepare(&priv->tmpl);
|
||||
nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_KEY, set->klen);
|
||||
|
|
|
@ -66,7 +66,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
|
|||
parent = rcu_dereference_raw(parent->rb_left);
|
||||
if (interval &&
|
||||
nft_rbtree_equal(set, this, interval) &&
|
||||
nft_rbtree_interval_end(this) &&
|
||||
nft_rbtree_interval_end(rbe) &&
|
||||
!nft_rbtree_interval_end(interval))
|
||||
continue;
|
||||
interval = rbe;
|
||||
|
|
|
@ -142,3 +142,4 @@ module_exit(nft_socket_module_exit);
|
|||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Máté Eckl");
|
||||
MODULE_DESCRIPTION("nf_tables socket match module");
|
||||
MODULE_ALIAS_NFT_EXPR("socket");
|
||||
|
|
|
@ -245,12 +245,22 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
|
|||
}
|
||||
|
||||
if (info->helper[0]) {
|
||||
if (strnlen(info->helper, sizeof(info->helper)) == sizeof(info->helper)) {
|
||||
ret = -ENAMETOOLONG;
|
||||
goto err3;
|
||||
}
|
||||
|
||||
ret = xt_ct_set_helper(ct, info->helper, par);
|
||||
if (ret < 0)
|
||||
goto err3;
|
||||
}
|
||||
|
||||
if (info->timeout[0]) {
|
||||
if (strnlen(info->timeout, sizeof(info->timeout)) == sizeof(info->timeout)) {
|
||||
ret = -ENAMETOOLONG;
|
||||
goto err4;
|
||||
}
|
||||
|
||||
ret = xt_ct_set_timeout(ct, par, info->timeout);
|
||||
if (ret < 0)
|
||||
goto err4;
|
||||
|
|
|
@ -211,7 +211,7 @@ static int __init connmark_mt_init(void)
|
|||
static void __exit connmark_mt_exit(void)
|
||||
{
|
||||
xt_unregister_match(&connmark_mt_reg);
|
||||
xt_unregister_target(connmark_tg_reg);
|
||||
xt_unregister_targets(connmark_tg_reg, ARRAY_SIZE(connmark_tg_reg));
|
||||
}
|
||||
|
||||
module_init(connmark_mt_init);
|
||||
|
|
|
@ -372,8 +372,8 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
|
|||
|
||||
/* Normalize to fit into jiffies */
|
||||
if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
|
||||
add_opt.ext.timeout > UINT_MAX / MSEC_PER_SEC)
|
||||
add_opt.ext.timeout = UINT_MAX / MSEC_PER_SEC;
|
||||
add_opt.ext.timeout > IPSET_MAX_TIMEOUT)
|
||||
add_opt.ext.timeout = IPSET_MAX_TIMEOUT;
|
||||
if (info->add_set.index != IPSET_INVALID_ID)
|
||||
ip_set_add(info->add_set.index, skb, par, &add_opt);
|
||||
if (info->del_set.index != IPSET_INVALID_ID)
|
||||
|
@ -407,8 +407,8 @@ set_target_v3(struct sk_buff *skb, const struct xt_action_param *par)
|
|||
|
||||
/* Normalize to fit into jiffies */
|
||||
if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
|
||||
add_opt.ext.timeout > UINT_MAX / MSEC_PER_SEC)
|
||||
add_opt.ext.timeout = UINT_MAX / MSEC_PER_SEC;
|
||||
add_opt.ext.timeout > IPSET_MAX_TIMEOUT)
|
||||
add_opt.ext.timeout = IPSET_MAX_TIMEOUT;
|
||||
if (info->add_set.index != IPSET_INVALID_ID)
|
||||
ip_set_add(info->add_set.index, skb, par, &add_opt);
|
||||
if (info->del_set.index != IPSET_INVALID_ID)
|
||||
|
@ -470,7 +470,7 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
|
|||
}
|
||||
if (((info->flags & IPSET_FLAG_MAP_SKBPRIO) |
|
||||
(info->flags & IPSET_FLAG_MAP_SKBQUEUE)) &&
|
||||
!(par->hook_mask & (1 << NF_INET_FORWARD |
|
||||
(par->hook_mask & ~(1 << NF_INET_FORWARD |
|
||||
1 << NF_INET_LOCAL_OUT |
|
||||
1 << NF_INET_POST_ROUTING))) {
|
||||
pr_info_ratelimited("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n");
|
||||
|
|
|
@ -193,4 +193,5 @@ struct rds_transport rds_loop_transport = {
|
|||
.inc_copy_to_user = rds_message_inc_copy_to_user,
|
||||
.inc_free = rds_loop_inc_free,
|
||||
.t_name = "loopback",
|
||||
.t_type = RDS_TRANS_LOOP,
|
||||
};
|
||||
|
|
|
@ -479,6 +479,11 @@ struct rds_notifier {
|
|||
int n_status;
|
||||
};
|
||||
|
||||
/* Available as part of RDS core, so doesn't need to participate
|
||||
* in get_preferred transport etc
|
||||
*/
|
||||
#define RDS_TRANS_LOOP 3
|
||||
|
||||
/**
|
||||
* struct rds_transport - transport specific behavioural hooks
|
||||
*
|
||||
|
|
|
@ -103,6 +103,11 @@ static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
|
|||
rds_stats_add(s_recv_bytes_added_to_socket, delta);
|
||||
else
|
||||
rds_stats_add(s_recv_bytes_removed_from_socket, -delta);
|
||||
|
||||
/* loop transport doesn't send/recv congestion updates */
|
||||
if (rs->rs_transport->t_type == RDS_TRANS_LOOP)
|
||||
return;
|
||||
|
||||
now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
|
||||
|
||||
rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
|
||||
|
|
|
@ -409,6 +409,21 @@ static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk)
|
|||
refcount_inc(&sk->sk_wmem_alloc);
|
||||
}
|
||||
|
||||
static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb)
|
||||
{
|
||||
if (SCTP_OUTPUT_CB(head)->last == head)
|
||||
skb_shinfo(head)->frag_list = skb;
|
||||
else
|
||||
SCTP_OUTPUT_CB(head)->last->next = skb;
|
||||
SCTP_OUTPUT_CB(head)->last = skb;
|
||||
|
||||
head->truesize += skb->truesize;
|
||||
head->data_len += skb->len;
|
||||
head->len += skb->len;
|
||||
|
||||
__skb_header_release(skb);
|
||||
}
|
||||
|
||||
static int sctp_packet_pack(struct sctp_packet *packet,
|
||||
struct sk_buff *head, int gso, gfp_t gfp)
|
||||
{
|
||||
|
@ -422,7 +437,7 @@ static int sctp_packet_pack(struct sctp_packet *packet,
|
|||
|
||||
if (gso) {
|
||||
skb_shinfo(head)->gso_type = sk->sk_gso_type;
|
||||
NAPI_GRO_CB(head)->last = head;
|
||||
SCTP_OUTPUT_CB(head)->last = head;
|
||||
} else {
|
||||
nskb = head;
|
||||
pkt_size = packet->size;
|
||||
|
@ -503,15 +518,8 @@ static int sctp_packet_pack(struct sctp_packet *packet,
|
|||
&packet->chunk_list);
|
||||
}
|
||||
|
||||
if (gso) {
|
||||
if (skb_gro_receive(&head, nskb)) {
|
||||
kfree_skb(nskb);
|
||||
return 0;
|
||||
}
|
||||
if (WARN_ON_ONCE(skb_shinfo(head)->gso_segs >=
|
||||
sk->sk_gso_max_segs))
|
||||
return 0;
|
||||
}
|
||||
if (gso)
|
||||
sctp_packet_gso_append(head, nskb);
|
||||
|
||||
pkt_count++;
|
||||
} while (!list_empty(&packet->chunk_list));
|
||||
|
|
|
@ -1273,8 +1273,7 @@ static __poll_t smc_accept_poll(struct sock *parent)
|
|||
return mask;
|
||||
}
|
||||
|
||||
static __poll_t smc_poll(struct file *file, struct socket *sock,
|
||||
poll_table *wait)
|
||||
static __poll_t smc_poll_mask(struct socket *sock, __poll_t events)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
__poll_t mask = 0;
|
||||
|
@ -1290,7 +1289,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
|
|||
if ((sk->sk_state == SMC_INIT) || smc->use_fallback) {
|
||||
/* delegate to CLC child sock */
|
||||
release_sock(sk);
|
||||
mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
|
||||
mask = smc->clcsock->ops->poll_mask(smc->clcsock, events);
|
||||
lock_sock(sk);
|
||||
sk->sk_err = smc->clcsock->sk->sk_err;
|
||||
if (sk->sk_err) {
|
||||
|
@ -1308,11 +1307,6 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
|
|||
}
|
||||
}
|
||||
} else {
|
||||
if (sk->sk_state != SMC_CLOSED) {
|
||||
release_sock(sk);
|
||||
sock_poll_wait(file, sk_sleep(sk), wait);
|
||||
lock_sock(sk);
|
||||
}
|
||||
if (sk->sk_err)
|
||||
mask |= EPOLLERR;
|
||||
if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
|
||||
|
@ -1625,7 +1619,7 @@ static const struct proto_ops smc_sock_ops = {
|
|||
.socketpair = sock_no_socketpair,
|
||||
.accept = smc_accept,
|
||||
.getname = smc_getname,
|
||||
.poll = smc_poll,
|
||||
.poll_mask = smc_poll_mask,
|
||||
.ioctl = smc_ioctl,
|
||||
.listen = smc_listen,
|
||||
.shutdown = smc_shutdown,
|
||||
|
|
|
@ -712,7 +712,7 @@ static int __init tls_register(void)
|
|||
build_protos(tls_prots[TLSV4], &tcp_prot);
|
||||
|
||||
tls_sw_proto_ops = inet_stream_ops;
|
||||
tls_sw_proto_ops.poll = tls_sw_poll;
|
||||
tls_sw_proto_ops.poll_mask = tls_sw_poll_mask;
|
||||
tls_sw_proto_ops.splice_read = tls_sw_splice_read;
|
||||
|
||||
#ifdef CONFIG_TLS_DEVICE
|
||||
|
|
|
@ -191,18 +191,12 @@ static void tls_free_both_sg(struct sock *sk)
|
|||
}
|
||||
|
||||
static int tls_do_encryption(struct tls_context *tls_ctx,
|
||||
struct tls_sw_context_tx *ctx, size_t data_len,
|
||||
gfp_t flags)
|
||||
struct tls_sw_context_tx *ctx,
|
||||
struct aead_request *aead_req,
|
||||
size_t data_len)
|
||||
{
|
||||
unsigned int req_size = sizeof(struct aead_request) +
|
||||
crypto_aead_reqsize(ctx->aead_send);
|
||||
struct aead_request *aead_req;
|
||||
int rc;
|
||||
|
||||
aead_req = kzalloc(req_size, flags);
|
||||
if (!aead_req)
|
||||
return -ENOMEM;
|
||||
|
||||
ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size;
|
||||
ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size;
|
||||
|
||||
|
@ -219,7 +213,6 @@ static int tls_do_encryption(struct tls_context *tls_ctx,
|
|||
ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
|
||||
ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
|
||||
|
||||
kfree(aead_req);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -228,8 +221,14 @@ static int tls_push_record(struct sock *sk, int flags,
|
|||
{
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
|
||||
struct aead_request *req;
|
||||
int rc;
|
||||
|
||||
req = kzalloc(sizeof(struct aead_request) +
|
||||
crypto_aead_reqsize(ctx->aead_send), sk->sk_allocation);
|
||||
if (!req)
|
||||
return -ENOMEM;
|
||||
|
||||
sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
|
||||
sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
|
||||
|
||||
|
@ -245,15 +244,14 @@ static int tls_push_record(struct sock *sk, int flags,
|
|||
tls_ctx->pending_open_record_frags = 0;
|
||||
set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
|
||||
|
||||
rc = tls_do_encryption(tls_ctx, ctx, ctx->sg_plaintext_size,
|
||||
sk->sk_allocation);
|
||||
rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size);
|
||||
if (rc < 0) {
|
||||
/* If we are called from write_space and
|
||||
* we fail, we need to set this SOCK_NOSPACE
|
||||
* to trigger another write_space in the future.
|
||||
*/
|
||||
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
|
||||
return rc;
|
||||
goto out_req;
|
||||
}
|
||||
|
||||
free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
|
||||
|
@ -268,6 +266,8 @@ static int tls_push_record(struct sock *sk, int flags,
|
|||
tls_err_abort(sk, EBADMSG);
|
||||
|
||||
tls_advance_record_sn(sk, &tls_ctx->tx);
|
||||
out_req:
|
||||
kfree(req);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -754,7 +754,7 @@ int tls_sw_recvmsg(struct sock *sk,
|
|||
struct sk_buff *skb;
|
||||
ssize_t copied = 0;
|
||||
bool cmsg = false;
|
||||
int err = 0;
|
||||
int target, err = 0;
|
||||
long timeo;
|
||||
|
||||
flags |= nonblock;
|
||||
|
@ -764,6 +764,7 @@ int tls_sw_recvmsg(struct sock *sk,
|
|||
|
||||
lock_sock(sk);
|
||||
|
||||
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
|
||||
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
|
||||
do {
|
||||
bool zc = false;
|
||||
|
@ -856,6 +857,9 @@ int tls_sw_recvmsg(struct sock *sk,
|
|||
goto recv_end;
|
||||
}
|
||||
}
|
||||
/* If we have a new message from strparser, continue now. */
|
||||
if (copied >= target && !ctx->recv_pkt)
|
||||
break;
|
||||
} while (len);
|
||||
|
||||
recv_end:
|
||||
|
@ -915,23 +919,22 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
|
|||
return copied ? : err;
|
||||
}
|
||||
|
||||
unsigned int tls_sw_poll(struct file *file, struct socket *sock,
|
||||
struct poll_table_struct *wait)
|
||||
__poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events)
|
||||
{
|
||||
unsigned int ret;
|
||||
struct sock *sk = sock->sk;
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
|
||||
__poll_t mask;
|
||||
|
||||
/* Grab POLLOUT and POLLHUP from the underlying socket */
|
||||
ret = ctx->sk_poll(file, sock, wait);
|
||||
/* Grab EPOLLOUT and EPOLLHUP from the underlying socket */
|
||||
mask = ctx->sk_poll_mask(sock, events);
|
||||
|
||||
/* Clear POLLIN bits, and set based on recv_pkt */
|
||||
ret &= ~(POLLIN | POLLRDNORM);
|
||||
/* Clear EPOLLIN bits, and set based on recv_pkt */
|
||||
mask &= ~(EPOLLIN | EPOLLRDNORM);
|
||||
if (ctx->recv_pkt)
|
||||
ret |= POLLIN | POLLRDNORM;
|
||||
mask |= EPOLLIN | EPOLLRDNORM;
|
||||
|
||||
return ret;
|
||||
return mask;
|
||||
}
|
||||
|
||||
static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
|
||||
|
@ -1188,7 +1191,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
|
|||
sk->sk_data_ready = tls_data_ready;
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
|
||||
sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll;
|
||||
sw_ctx_rx->sk_poll_mask = sk->sk_socket->ops->poll_mask;
|
||||
|
||||
strp_check_rcv(&sw_ctx_rx->strp);
|
||||
}
|
||||
|
|
|
@ -1012,6 +1012,7 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev)
|
|||
nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE);
|
||||
|
||||
list_del_rcu(&wdev->list);
|
||||
synchronize_rcu();
|
||||
rdev->devlist_generation++;
|
||||
|
||||
switch (wdev->iftype) {
|
||||
|
|
|
@ -1746,6 +1746,8 @@ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
|
|||
if (!rdev->ops->get_station)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
memset(sinfo, 0, sizeof(*sinfo));
|
||||
|
||||
return rdev_get_station(rdev, dev, mac_addr, sinfo);
|
||||
}
|
||||
EXPORT_SYMBOL(cfg80211_get_station);
|
||||
|
|
|
@ -204,7 +204,8 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem)
|
|||
long npgs;
|
||||
int err;
|
||||
|
||||
umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs), GFP_KERNEL);
|
||||
umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs),
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!umem->pgs)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -16,9 +16,7 @@ LDLIBS += -lcap -lelf -lrt -lpthread
|
|||
TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read
|
||||
all: $(TEST_CUSTOM_PROGS)
|
||||
|
||||
$(TEST_CUSTOM_PROGS): urandom_read
|
||||
|
||||
urandom_read: urandom_read.c
|
||||
$(TEST_CUSTOM_PROGS): $(OUTPUT)/%: %.c
|
||||
$(CC) -o $(TEST_CUSTOM_PROGS) -static $< -Wl,--build-id
|
||||
|
||||
# Order correspond to 'make run_tests' order
|
||||
|
|
|
@ -568,7 +568,7 @@
|
|||
"matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*use tcindex 65535.*index 1",
|
||||
"matchCount": "1",
|
||||
"teardown": [
|
||||
"$TC actions flush action skbedit"
|
||||
"$TC actions flush action ife"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue