mirror of https://gitee.com/openkylin/linux.git
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix rtlwifi crash, from Larry Finger. 2) Memory disclosure in appletalk ipddp routing code, from Vlad Tsyrklevich. 3) r8152 can erroneously split an RX packet into multiple URBs if the Rx FIFO is not empty when we suspend. Fix this by waiting for the FIFO to empty before suspending. From Hayes Wang. 4) Two GRO fixes (enter slow path when not enough SKB tail room exists, disable frag0 optimizations when there are IPV6 extension headers) from Eric Dumazet and Herbert Xu. 5) A series of mlx5e bug fixes (do source udp port offloading for tunnels properly, Ip fragment matching fixes, handling firmware errors properly when installing TC rules, etc.) from Saeed Mahameed, Or Gerlitz, Roi Dayan, Hadar Hen Zion, Gil Rockah, and Daniel Jurgens. 6) Two VRF fixes from David Ahern (don't skip multipath selection for VRF paths, disallow VRF to be configured with table ID 0). * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (35 commits) net: vrf: do not allow table id 0 net: phy: marvell: fix Marvell 88E1512 used in SGMII mode sctp: Fix spelling mistake: "Atempt" -> "Attempt" net: ipv4: Fix multipath selection with vrf cgroup: move CONFIG_SOCK_CGROUP_DATA to init/Kconfig gro: use min_t() in skb_gro_reset_offset() net/mlx5: Only cancel recovery work when cleaning up device net/mlx5e: Remove WARN_ONCE from adaptive moderation code net/mlx5e: Un-register uplink representor on nic_disable net/mlx5e: Properly handle FW errors while adding TC rules net/mlx5e: Fix kbuild warnings for uninitialized parameters net/mlx5e: Set inline mode requirements for matching on IP fragments net/mlx5e: Properly get address type of encapsulation IP headers net/mlx5e: TC ipv4 tunnel encap offload error flow fixes net/mlx5e: Warn when rejecting offload attempts of IP tunnels net/mlx5e: Properly handle offloading of source udp port for IP tunnels gro: Disable frag0 optimization on IPv6 ext headers gro: Enter slow-path if there is no tailroom mlx4: Return EOPNOTSUPP instead of ENOTSUPP net/af_iucv: don't use paged skbs for TX on HiperSockets ...
This commit is contained in:
commit
cff3b2c4b3
|
@ -190,7 +190,7 @@ static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
*/
|
*/
|
||||||
static int ipddp_create(struct ipddp_route *new_rt)
|
static int ipddp_create(struct ipddp_route *new_rt)
|
||||||
{
|
{
|
||||||
struct ipddp_route *rt = kmalloc(sizeof(*rt), GFP_KERNEL);
|
struct ipddp_route *rt = kzalloc(sizeof(*rt), GFP_KERNEL);
|
||||||
|
|
||||||
if (rt == NULL)
|
if (rt == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -2277,7 +2277,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
|
||||||
|
|
||||||
if (priv->tx_ring_num[TX_XDP] &&
|
if (priv->tx_ring_num[TX_XDP] &&
|
||||||
!mlx4_en_check_xdp_mtu(dev, new_mtu))
|
!mlx4_en_check_xdp_mtu(dev, new_mtu))
|
||||||
return -ENOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
dev->mtu = new_mtu;
|
dev->mtu = new_mtu;
|
||||||
|
|
||||||
|
|
|
@ -3675,14 +3675,8 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
|
||||||
|
|
||||||
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
|
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
struct mlx5_core_dev *mdev = priv->mdev;
|
|
||||||
struct mlx5_eswitch *esw = mdev->priv.eswitch;
|
|
||||||
|
|
||||||
mlx5e_vxlan_cleanup(priv);
|
mlx5e_vxlan_cleanup(priv);
|
||||||
|
|
||||||
if (MLX5_CAP_GEN(mdev, vport_group_manager))
|
|
||||||
mlx5_eswitch_unregister_vport_rep(esw, 0);
|
|
||||||
|
|
||||||
if (priv->xdp_prog)
|
if (priv->xdp_prog)
|
||||||
bpf_prog_put(priv->xdp_prog);
|
bpf_prog_put(priv->xdp_prog);
|
||||||
}
|
}
|
||||||
|
@ -3807,9 +3801,14 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
|
||||||
|
|
||||||
static void mlx5e_nic_disable(struct mlx5e_priv *priv)
|
static void mlx5e_nic_disable(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
|
struct mlx5_core_dev *mdev = priv->mdev;
|
||||||
|
struct mlx5_eswitch *esw = mdev->priv.eswitch;
|
||||||
|
|
||||||
queue_work(priv->wq, &priv->set_rx_mode_work);
|
queue_work(priv->wq, &priv->set_rx_mode_work);
|
||||||
|
if (MLX5_CAP_GEN(mdev, vport_group_manager))
|
||||||
|
mlx5_eswitch_unregister_vport_rep(esw, 0);
|
||||||
mlx5e_disable_async_events(priv);
|
mlx5e_disable_async_events(priv);
|
||||||
mlx5_lag_remove(priv->mdev);
|
mlx5_lag_remove(mdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct mlx5e_profile mlx5e_nic_profile = {
|
static const struct mlx5e_profile mlx5e_nic_profile = {
|
||||||
|
|
|
@ -109,7 +109,6 @@ static bool mlx5e_am_on_top(struct mlx5e_rx_am *am)
|
||||||
switch (am->tune_state) {
|
switch (am->tune_state) {
|
||||||
case MLX5E_AM_PARKING_ON_TOP:
|
case MLX5E_AM_PARKING_ON_TOP:
|
||||||
case MLX5E_AM_PARKING_TIRED:
|
case MLX5E_AM_PARKING_TIRED:
|
||||||
WARN_ONCE(true, "mlx5e_am_on_top: PARKING\n");
|
|
||||||
return true;
|
return true;
|
||||||
case MLX5E_AM_GOING_RIGHT:
|
case MLX5E_AM_GOING_RIGHT:
|
||||||
return (am->steps_left > 1) && (am->steps_right == 1);
|
return (am->steps_left > 1) && (am->steps_right == 1);
|
||||||
|
@ -123,7 +122,6 @@ static void mlx5e_am_turn(struct mlx5e_rx_am *am)
|
||||||
switch (am->tune_state) {
|
switch (am->tune_state) {
|
||||||
case MLX5E_AM_PARKING_ON_TOP:
|
case MLX5E_AM_PARKING_ON_TOP:
|
||||||
case MLX5E_AM_PARKING_TIRED:
|
case MLX5E_AM_PARKING_TIRED:
|
||||||
WARN_ONCE(true, "mlx5e_am_turn: PARKING\n");
|
|
||||||
break;
|
break;
|
||||||
case MLX5E_AM_GOING_RIGHT:
|
case MLX5E_AM_GOING_RIGHT:
|
||||||
am->tune_state = MLX5E_AM_GOING_LEFT;
|
am->tune_state = MLX5E_AM_GOING_LEFT;
|
||||||
|
@ -144,7 +142,6 @@ static int mlx5e_am_step(struct mlx5e_rx_am *am)
|
||||||
switch (am->tune_state) {
|
switch (am->tune_state) {
|
||||||
case MLX5E_AM_PARKING_ON_TOP:
|
case MLX5E_AM_PARKING_ON_TOP:
|
||||||
case MLX5E_AM_PARKING_TIRED:
|
case MLX5E_AM_PARKING_TIRED:
|
||||||
WARN_ONCE(true, "mlx5e_am_step: PARKING\n");
|
|
||||||
break;
|
break;
|
||||||
case MLX5E_AM_GOING_RIGHT:
|
case MLX5E_AM_GOING_RIGHT:
|
||||||
if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1))
|
if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1))
|
||||||
|
@ -282,10 +279,8 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
|
||||||
u32 delta_us = ktime_us_delta(end->time, start->time);
|
u32 delta_us = ktime_us_delta(end->time, start->time);
|
||||||
unsigned int npkts = end->pkt_ctr - start->pkt_ctr;
|
unsigned int npkts = end->pkt_ctr - start->pkt_ctr;
|
||||||
|
|
||||||
if (!delta_us) {
|
if (!delta_us)
|
||||||
WARN_ONCE(true, "mlx5e_am_calc_stats: delta_us=0\n");
|
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
|
|
||||||
curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us;
|
curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us;
|
||||||
curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us;
|
curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us;
|
||||||
|
|
|
@ -161,15 +161,21 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* we get here also when setting rule to the FW failed, etc. It means that the
|
||||||
|
* flow rule itself might not exist, but some offloading related to the actions
|
||||||
|
* should be cleaned.
|
||||||
|
*/
|
||||||
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
|
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
|
||||||
struct mlx5e_tc_flow *flow)
|
struct mlx5e_tc_flow *flow)
|
||||||
{
|
{
|
||||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||||
struct mlx5_fc *counter = NULL;
|
struct mlx5_fc *counter = NULL;
|
||||||
|
|
||||||
|
if (!IS_ERR(flow->rule)) {
|
||||||
counter = mlx5_flow_rule_counter(flow->rule);
|
counter = mlx5_flow_rule_counter(flow->rule);
|
||||||
|
|
||||||
mlx5_del_flow_rules(flow->rule);
|
mlx5_del_flow_rules(flow->rule);
|
||||||
|
mlx5_fc_destroy(priv->mdev, counter);
|
||||||
|
}
|
||||||
|
|
||||||
if (esw && esw->mode == SRIOV_OFFLOADS) {
|
if (esw && esw->mode == SRIOV_OFFLOADS) {
|
||||||
mlx5_eswitch_del_vlan_action(esw, flow->attr);
|
mlx5_eswitch_del_vlan_action(esw, flow->attr);
|
||||||
|
@ -177,8 +183,6 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
|
||||||
mlx5e_detach_encap(priv, flow);
|
mlx5e_detach_encap(priv, flow);
|
||||||
}
|
}
|
||||||
|
|
||||||
mlx5_fc_destroy(priv->mdev, counter);
|
|
||||||
|
|
||||||
if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
|
if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
|
||||||
mlx5_destroy_flow_table(priv->fs.tc.t);
|
mlx5_destroy_flow_table(priv->fs.tc.t);
|
||||||
priv->fs.tc.t = NULL;
|
priv->fs.tc.t = NULL;
|
||||||
|
@ -225,6 +229,11 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
|
||||||
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
|
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
|
||||||
outer_headers);
|
outer_headers);
|
||||||
|
|
||||||
|
struct flow_dissector_key_control *enc_control =
|
||||||
|
skb_flow_dissector_target(f->dissector,
|
||||||
|
FLOW_DISSECTOR_KEY_ENC_CONTROL,
|
||||||
|
f->key);
|
||||||
|
|
||||||
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
|
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
|
||||||
struct flow_dissector_key_ports *key =
|
struct flow_dissector_key_ports *key =
|
||||||
skb_flow_dissector_target(f->dissector,
|
skb_flow_dissector_target(f->dissector,
|
||||||
|
@ -237,28 +246,34 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
|
||||||
|
|
||||||
/* Full udp dst port must be given */
|
/* Full udp dst port must be given */
|
||||||
if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
|
if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
|
||||||
return -EOPNOTSUPP;
|
goto vxlan_match_offload_err;
|
||||||
|
|
||||||
/* udp src port isn't supported */
|
|
||||||
if (memchr_inv(&mask->src, 0, sizeof(mask->src)))
|
|
||||||
return -EOPNOTSUPP;
|
|
||||||
|
|
||||||
if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
|
if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
|
||||||
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
|
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
|
||||||
parse_vxlan_attr(spec, f);
|
parse_vxlan_attr(spec, f);
|
||||||
else
|
else {
|
||||||
|
netdev_warn(priv->netdev,
|
||||||
|
"%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
}
|
||||||
|
|
||||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
||||||
udp_dport, ntohs(mask->dst));
|
udp_dport, ntohs(mask->dst));
|
||||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
||||||
udp_dport, ntohs(key->dst));
|
udp_dport, ntohs(key->dst));
|
||||||
|
|
||||||
|
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
||||||
|
udp_sport, ntohs(mask->src));
|
||||||
|
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
||||||
|
udp_sport, ntohs(key->src));
|
||||||
} else { /* udp dst port must be given */
|
} else { /* udp dst port must be given */
|
||||||
|
vxlan_match_offload_err:
|
||||||
|
netdev_warn(priv->netdev,
|
||||||
|
"IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
|
if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
|
||||||
struct flow_dissector_key_ipv4_addrs *key =
|
struct flow_dissector_key_ipv4_addrs *key =
|
||||||
skb_flow_dissector_target(f->dissector,
|
skb_flow_dissector_target(f->dissector,
|
||||||
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
|
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
|
||||||
|
@ -280,10 +295,10 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
|
||||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
||||||
dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
|
dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
|
||||||
ntohl(key->dst));
|
ntohl(key->dst));
|
||||||
}
|
|
||||||
|
|
||||||
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
|
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
|
||||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
|
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
|
||||||
|
}
|
||||||
|
|
||||||
/* Enforce DMAC when offloading incoming tunneled flows.
|
/* Enforce DMAC when offloading incoming tunneled flows.
|
||||||
* Flow counters require a match on the DMAC.
|
* Flow counters require a match on the DMAC.
|
||||||
|
@ -346,6 +361,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
||||||
if (parse_tunnel_attr(priv, spec, f))
|
if (parse_tunnel_attr(priv, spec, f))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
break;
|
break;
|
||||||
|
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
|
||||||
|
netdev_warn(priv->netdev,
|
||||||
|
"IPv6 tunnel decap offload isn't supported\n");
|
||||||
default:
|
default:
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
@ -375,6 +393,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
||||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
|
MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
|
||||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
|
MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
|
||||||
key->flags & FLOW_DIS_IS_FRAGMENT);
|
key->flags & FLOW_DIS_IS_FRAGMENT);
|
||||||
|
|
||||||
|
/* the HW doesn't need L3 inline to match on frag=no */
|
||||||
|
if (key->flags & FLOW_DIS_IS_FRAGMENT)
|
||||||
|
*min_inline = MLX5_INLINE_MODE_IP;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -647,17 +669,14 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_INET)
|
#if IS_ENABLED(CONFIG_INET)
|
||||||
rt = ip_route_output_key(dev_net(mirred_dev), fl4);
|
rt = ip_route_output_key(dev_net(mirred_dev), fl4);
|
||||||
if (IS_ERR(rt)) {
|
if (IS_ERR(rt))
|
||||||
pr_warn("%s: no route to %pI4\n", __func__, &fl4->daddr);
|
return PTR_ERR(rt);
|
||||||
return -EOPNOTSUPP;
|
|
||||||
}
|
|
||||||
#else
|
#else
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
|
if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
|
||||||
pr_warn("%s: Can't offload the flow, netdevices aren't on the same HW e-switch\n",
|
pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__);
|
||||||
__func__);
|
|
||||||
ip_rt_put(rt);
|
ip_rt_put(rt);
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
@ -718,12 +737,12 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
|
||||||
struct net_device **out_dev)
|
struct net_device **out_dev)
|
||||||
{
|
{
|
||||||
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
|
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
|
||||||
|
struct neighbour *n = NULL;
|
||||||
struct flowi4 fl4 = {};
|
struct flowi4 fl4 = {};
|
||||||
struct neighbour *n;
|
|
||||||
char *encap_header;
|
char *encap_header;
|
||||||
int encap_size;
|
int encap_size;
|
||||||
__be32 saddr;
|
__be32 saddr = 0;
|
||||||
int ttl;
|
int ttl = 0;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
encap_header = kzalloc(max_encap_size, GFP_KERNEL);
|
encap_header = kzalloc(max_encap_size, GFP_KERNEL);
|
||||||
|
@ -750,7 +769,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
|
||||||
e->out_dev = *out_dev;
|
e->out_dev = *out_dev;
|
||||||
|
|
||||||
if (!(n->nud_state & NUD_VALID)) {
|
if (!(n->nud_state & NUD_VALID)) {
|
||||||
err = -ENOTSUPP;
|
pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
|
||||||
|
err = -EOPNOTSUPP;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -772,6 +792,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
|
||||||
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
|
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
|
||||||
encap_size, encap_header, &e->encap_id);
|
encap_size, encap_header, &e->encap_id);
|
||||||
out:
|
out:
|
||||||
|
if (err && n)
|
||||||
|
neigh_release(n);
|
||||||
kfree(encap_header);
|
kfree(encap_header);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -792,9 +814,17 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
|
||||||
int tunnel_type;
|
int tunnel_type;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
/* udp dst port must be given */
|
/* udp dst port must be set */
|
||||||
if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
|
if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
|
||||||
|
goto vxlan_encap_offload_err;
|
||||||
|
|
||||||
|
/* setting udp src port isn't supported */
|
||||||
|
if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
|
||||||
|
vxlan_encap_offload_err:
|
||||||
|
netdev_warn(priv->netdev,
|
||||||
|
"must set udp dst port and not set udp src port\n");
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
}
|
||||||
|
|
||||||
if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
|
if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
|
||||||
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
|
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
|
||||||
|
@ -802,6 +832,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
|
||||||
info.tun_id = tunnel_id_to_key32(key->tun_id);
|
info.tun_id = tunnel_id_to_key32(key->tun_id);
|
||||||
tunnel_type = MLX5_HEADER_TYPE_VXLAN;
|
tunnel_type = MLX5_HEADER_TYPE_VXLAN;
|
||||||
} else {
|
} else {
|
||||||
|
netdev_warn(priv->netdev,
|
||||||
|
"%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -809,6 +841,9 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
|
||||||
case AF_INET:
|
case AF_INET:
|
||||||
info.daddr = key->u.ipv4.dst;
|
info.daddr = key->u.ipv4.dst;
|
||||||
break;
|
break;
|
||||||
|
case AF_INET6:
|
||||||
|
netdev_warn(priv->netdev,
|
||||||
|
"IPv6 tunnel encap offload isn't supported\n");
|
||||||
default:
|
default:
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
@ -986,7 +1021,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
|
||||||
|
|
||||||
if (IS_ERR(flow->rule)) {
|
if (IS_ERR(flow->rule)) {
|
||||||
err = PTR_ERR(flow->rule);
|
err = PTR_ERR(flow->rule);
|
||||||
goto err_free;
|
goto err_del_rule;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = rhashtable_insert_fast(&tc->ht, &flow->node,
|
err = rhashtable_insert_fast(&tc->ht, &flow->node,
|
||||||
|
@ -997,7 +1032,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
err_del_rule:
|
err_del_rule:
|
||||||
mlx5_del_flow_rules(flow->rule);
|
mlx5e_tc_del_flow(priv, flow);
|
||||||
|
|
||||||
err_free:
|
err_free:
|
||||||
kfree(flow);
|
kfree(flow);
|
||||||
|
|
|
@ -1195,6 +1195,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
|
||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
|
if (cleanup)
|
||||||
mlx5_drain_health_wq(dev);
|
mlx5_drain_health_wq(dev);
|
||||||
|
|
||||||
mutex_lock(&dev->intf_state_mutex);
|
mutex_lock(&dev->intf_state_mutex);
|
||||||
|
@ -1359,9 +1360,10 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
|
||||||
|
|
||||||
mlx5_enter_error_state(dev);
|
mlx5_enter_error_state(dev);
|
||||||
mlx5_unload_one(dev, priv, false);
|
mlx5_unload_one(dev, priv, false);
|
||||||
/* In case of kernel call save the pci state */
|
/* In case of kernel call save the pci state and drain the health wq */
|
||||||
if (state) {
|
if (state) {
|
||||||
pci_save_state(pdev);
|
pci_save_state(pdev);
|
||||||
|
mlx5_drain_health_wq(dev);
|
||||||
mlx5_pci_disable_device(dev);
|
mlx5_pci_disable_device(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -279,6 +279,7 @@ config MARVELL_PHY
|
||||||
|
|
||||||
config MESON_GXL_PHY
|
config MESON_GXL_PHY
|
||||||
tristate "Amlogic Meson GXL Internal PHY"
|
tristate "Amlogic Meson GXL Internal PHY"
|
||||||
|
depends on ARCH_MESON || COMPILE_TEST
|
||||||
---help---
|
---help---
|
||||||
Currently has a driver for the Amlogic Meson GXL Internal PHY
|
Currently has a driver for the Amlogic Meson GXL Internal PHY
|
||||||
|
|
||||||
|
|
|
@ -1192,7 +1192,8 @@ static int marvell_read_status(struct phy_device *phydev)
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
/* Check the fiber mode first */
|
/* Check the fiber mode first */
|
||||||
if (phydev->supported & SUPPORTED_FIBRE) {
|
if (phydev->supported & SUPPORTED_FIBRE &&
|
||||||
|
phydev->interface != PHY_INTERFACE_MODE_SGMII) {
|
||||||
err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER);
|
err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
|
@ -1065,6 +1065,15 @@ void phy_state_machine(struct work_struct *work)
|
||||||
if (old_link != phydev->link)
|
if (old_link != phydev->link)
|
||||||
phydev->state = PHY_CHANGELINK;
|
phydev->state = PHY_CHANGELINK;
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* Failsafe: check that nobody set phydev->link=0 between two
|
||||||
|
* poll cycles, otherwise we won't leave RUNNING state as long
|
||||||
|
* as link remains down.
|
||||||
|
*/
|
||||||
|
if (!phydev->link && phydev->state == PHY_RUNNING) {
|
||||||
|
phydev->state = PHY_CHANGELINK;
|
||||||
|
phydev_err(phydev, "no link in PHY_RUNNING\n");
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case PHY_CHANGELINK:
|
case PHY_CHANGELINK:
|
||||||
err = phy_read_status(phydev);
|
err = phy_read_status(phydev);
|
||||||
|
|
|
@ -3576,39 +3576,87 @@ static bool delay_autosuspend(struct r8152 *tp)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
|
static int rtl8152_rumtime_suspend(struct r8152 *tp)
|
||||||
{
|
{
|
||||||
struct r8152 *tp = usb_get_intfdata(intf);
|
|
||||||
struct net_device *netdev = tp->netdev;
|
struct net_device *netdev = tp->netdev;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
mutex_lock(&tp->control);
|
if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
|
||||||
|
u32 rcr = 0;
|
||||||
|
|
||||||
if (PMSG_IS_AUTO(message)) {
|
if (delay_autosuspend(tp)) {
|
||||||
if (netif_running(netdev) && delay_autosuspend(tp)) {
|
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
goto out1;
|
goto out1;
|
||||||
}
|
}
|
||||||
|
|
||||||
set_bit(SELECTIVE_SUSPEND, &tp->flags);
|
if (netif_carrier_ok(netdev)) {
|
||||||
} else {
|
u32 ocp_data;
|
||||||
netif_device_detach(netdev);
|
|
||||||
|
rcr = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
|
||||||
|
ocp_data = rcr & ~RCR_ACPT_ALL;
|
||||||
|
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
|
||||||
|
rxdy_gated_en(tp, true);
|
||||||
|
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA,
|
||||||
|
PLA_OOB_CTRL);
|
||||||
|
if (!(ocp_data & RXFIFO_EMPTY)) {
|
||||||
|
rxdy_gated_en(tp, false);
|
||||||
|
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
|
||||||
|
ret = -EBUSY;
|
||||||
|
goto out1;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
clear_bit(WORK_ENABLE, &tp->flags);
|
||||||
|
usb_kill_urb(tp->intr_urb);
|
||||||
|
|
||||||
|
tp->rtl_ops.autosuspend_en(tp, true);
|
||||||
|
|
||||||
|
if (netif_carrier_ok(netdev)) {
|
||||||
|
napi_disable(&tp->napi);
|
||||||
|
rtl_stop_rx(tp);
|
||||||
|
rxdy_gated_en(tp, false);
|
||||||
|
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
|
||||||
|
napi_enable(&tp->napi);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
set_bit(SELECTIVE_SUSPEND, &tp->flags);
|
||||||
|
|
||||||
|
out1:
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int rtl8152_system_suspend(struct r8152 *tp)
|
||||||
|
{
|
||||||
|
struct net_device *netdev = tp->netdev;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
netif_device_detach(netdev);
|
||||||
|
|
||||||
if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
|
if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
|
||||||
clear_bit(WORK_ENABLE, &tp->flags);
|
clear_bit(WORK_ENABLE, &tp->flags);
|
||||||
usb_kill_urb(tp->intr_urb);
|
usb_kill_urb(tp->intr_urb);
|
||||||
napi_disable(&tp->napi);
|
napi_disable(&tp->napi);
|
||||||
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
|
|
||||||
rtl_stop_rx(tp);
|
|
||||||
tp->rtl_ops.autosuspend_en(tp, true);
|
|
||||||
} else {
|
|
||||||
cancel_delayed_work_sync(&tp->schedule);
|
cancel_delayed_work_sync(&tp->schedule);
|
||||||
tp->rtl_ops.down(tp);
|
tp->rtl_ops.down(tp);
|
||||||
}
|
|
||||||
napi_enable(&tp->napi);
|
napi_enable(&tp->napi);
|
||||||
}
|
}
|
||||||
out1:
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
|
||||||
|
{
|
||||||
|
struct r8152 *tp = usb_get_intfdata(intf);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
mutex_lock(&tp->control);
|
||||||
|
|
||||||
|
if (PMSG_IS_AUTO(message))
|
||||||
|
ret = rtl8152_rumtime_suspend(tp);
|
||||||
|
else
|
||||||
|
ret = rtl8152_system_suspend(tp);
|
||||||
|
|
||||||
mutex_unlock(&tp->control);
|
mutex_unlock(&tp->control);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -263,7 +263,9 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
|
||||||
.flowi4_iif = LOOPBACK_IFINDEX,
|
.flowi4_iif = LOOPBACK_IFINDEX,
|
||||||
.flowi4_tos = RT_TOS(ip4h->tos),
|
.flowi4_tos = RT_TOS(ip4h->tos),
|
||||||
.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF,
|
.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF,
|
||||||
|
.flowi4_proto = ip4h->protocol,
|
||||||
.daddr = ip4h->daddr,
|
.daddr = ip4h->daddr,
|
||||||
|
.saddr = ip4h->saddr,
|
||||||
};
|
};
|
||||||
struct net *net = dev_net(vrf_dev);
|
struct net *net = dev_net(vrf_dev);
|
||||||
struct rtable *rt;
|
struct rtable *rt;
|
||||||
|
@ -1250,6 +1252,8 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
|
vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
|
||||||
|
if (vrf->tb_id == RT_TABLE_UNSPEC)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
dev->priv_flags |= IFF_L3MDEV_MASTER;
|
dev->priv_flags |= IFF_L3MDEV_MASTER;
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
/********************************************************************/
|
/********************************************************************/
|
||||||
int orinoco_mic_init(struct orinoco_private *priv)
|
int orinoco_mic_init(struct orinoco_private *priv)
|
||||||
{
|
{
|
||||||
priv->tx_tfm_mic = crypto_alloc_ahash("michael_mic", 0,
|
priv->tx_tfm_mic = crypto_alloc_shash("michael_mic", 0,
|
||||||
CRYPTO_ALG_ASYNC);
|
CRYPTO_ALG_ASYNC);
|
||||||
if (IS_ERR(priv->tx_tfm_mic)) {
|
if (IS_ERR(priv->tx_tfm_mic)) {
|
||||||
printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
|
printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
|
||||||
|
@ -25,7 +25,7 @@ int orinoco_mic_init(struct orinoco_private *priv)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
priv->rx_tfm_mic = crypto_alloc_ahash("michael_mic", 0,
|
priv->rx_tfm_mic = crypto_alloc_shash("michael_mic", 0,
|
||||||
CRYPTO_ALG_ASYNC);
|
CRYPTO_ALG_ASYNC);
|
||||||
if (IS_ERR(priv->rx_tfm_mic)) {
|
if (IS_ERR(priv->rx_tfm_mic)) {
|
||||||
printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
|
printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
|
||||||
|
@ -40,17 +40,16 @@ int orinoco_mic_init(struct orinoco_private *priv)
|
||||||
void orinoco_mic_free(struct orinoco_private *priv)
|
void orinoco_mic_free(struct orinoco_private *priv)
|
||||||
{
|
{
|
||||||
if (priv->tx_tfm_mic)
|
if (priv->tx_tfm_mic)
|
||||||
crypto_free_ahash(priv->tx_tfm_mic);
|
crypto_free_shash(priv->tx_tfm_mic);
|
||||||
if (priv->rx_tfm_mic)
|
if (priv->rx_tfm_mic)
|
||||||
crypto_free_ahash(priv->rx_tfm_mic);
|
crypto_free_shash(priv->rx_tfm_mic);
|
||||||
}
|
}
|
||||||
|
|
||||||
int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key,
|
int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key,
|
||||||
u8 *da, u8 *sa, u8 priority,
|
u8 *da, u8 *sa, u8 priority,
|
||||||
u8 *data, size_t data_len, u8 *mic)
|
u8 *data, size_t data_len, u8 *mic)
|
||||||
{
|
{
|
||||||
AHASH_REQUEST_ON_STACK(req, tfm_michael);
|
SHASH_DESC_ON_STACK(desc, tfm_michael);
|
||||||
struct scatterlist sg[2];
|
|
||||||
u8 hdr[ETH_HLEN + 2]; /* size of header + padding */
|
u8 hdr[ETH_HLEN + 2]; /* size of header + padding */
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
@ -67,18 +66,27 @@ int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key,
|
||||||
hdr[ETH_ALEN * 2 + 2] = 0;
|
hdr[ETH_ALEN * 2 + 2] = 0;
|
||||||
hdr[ETH_ALEN * 2 + 3] = 0;
|
hdr[ETH_ALEN * 2 + 3] = 0;
|
||||||
|
|
||||||
/* Use scatter gather to MIC header and data in one go */
|
desc->tfm = tfm_michael;
|
||||||
sg_init_table(sg, 2);
|
desc->flags = 0;
|
||||||
sg_set_buf(&sg[0], hdr, sizeof(hdr));
|
|
||||||
sg_set_buf(&sg[1], data, data_len);
|
|
||||||
|
|
||||||
if (crypto_ahash_setkey(tfm_michael, key, MIC_KEYLEN))
|
err = crypto_shash_setkey(tfm_michael, key, MIC_KEYLEN);
|
||||||
return -1;
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
err = crypto_shash_init(desc);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
err = crypto_shash_update(desc, hdr, sizeof(hdr));
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
err = crypto_shash_update(desc, data, data_len);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
err = crypto_shash_final(desc, mic);
|
||||||
|
shash_desc_zero(desc);
|
||||||
|
|
||||||
ahash_request_set_tfm(req, tfm_michael);
|
|
||||||
ahash_request_set_callback(req, 0, NULL, NULL);
|
|
||||||
ahash_request_set_crypt(req, sg, mic, data_len + sizeof(hdr));
|
|
||||||
err = crypto_ahash_digest(req);
|
|
||||||
ahash_request_zero(req);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
#define _ORINOCO_MIC_H_
|
#define _ORINOCO_MIC_H_
|
||||||
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
#include <crypto/hash.h>
|
||||||
|
|
||||||
#define MICHAEL_MIC_LEN 8
|
#define MICHAEL_MIC_LEN 8
|
||||||
|
|
||||||
|
@ -15,7 +16,7 @@ struct crypto_ahash;
|
||||||
|
|
||||||
int orinoco_mic_init(struct orinoco_private *priv);
|
int orinoco_mic_init(struct orinoco_private *priv);
|
||||||
void orinoco_mic_free(struct orinoco_private *priv);
|
void orinoco_mic_free(struct orinoco_private *priv);
|
||||||
int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key,
|
int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key,
|
||||||
u8 *da, u8 *sa, u8 priority,
|
u8 *da, u8 *sa, u8 priority,
|
||||||
u8 *data, size_t data_len, u8 *mic);
|
u8 *data, size_t data_len, u8 *mic);
|
||||||
|
|
||||||
|
|
|
@ -152,8 +152,8 @@ struct orinoco_private {
|
||||||
u8 *wpa_ie;
|
u8 *wpa_ie;
|
||||||
int wpa_ie_len;
|
int wpa_ie_len;
|
||||||
|
|
||||||
struct crypto_ahash *rx_tfm_mic;
|
struct crypto_shash *rx_tfm_mic;
|
||||||
struct crypto_ahash *tx_tfm_mic;
|
struct crypto_shash *tx_tfm_mic;
|
||||||
|
|
||||||
unsigned int wpa_enabled:1;
|
unsigned int wpa_enabled:1;
|
||||||
unsigned int tkip_cm_active:1;
|
unsigned int tkip_cm_active:1;
|
||||||
|
|
|
@ -1063,6 +1063,7 @@ int rtl_usb_probe(struct usb_interface *intf,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
rtlpriv = hw->priv;
|
rtlpriv = hw->priv;
|
||||||
|
rtlpriv->hw = hw;
|
||||||
rtlpriv->usb_data = kzalloc(RTL_USB_MAX_RX_COUNT * sizeof(u32),
|
rtlpriv->usb_data = kzalloc(RTL_USB_MAX_RX_COUNT * sizeof(u32),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!rtlpriv->usb_data)
|
if (!rtlpriv->usb_data)
|
||||||
|
|
|
@ -2477,14 +2477,19 @@ static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
|
||||||
return NAPI_GRO_CB(skb)->frag0_len < hlen;
|
return NAPI_GRO_CB(skb)->frag0_len < hlen;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
NAPI_GRO_CB(skb)->frag0 = NULL;
|
||||||
|
NAPI_GRO_CB(skb)->frag0_len = 0;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
|
static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
|
||||||
unsigned int offset)
|
unsigned int offset)
|
||||||
{
|
{
|
||||||
if (!pskb_may_pull(skb, hlen))
|
if (!pskb_may_pull(skb, hlen))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
NAPI_GRO_CB(skb)->frag0 = NULL;
|
skb_gro_frag0_invalidate(skb);
|
||||||
NAPI_GRO_CB(skb)->frag0_len = 0;
|
|
||||||
return skb->data + offset;
|
return skb->data + offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1176,6 +1176,10 @@ config CGROUP_DEBUG
|
||||||
|
|
||||||
Say N.
|
Say N.
|
||||||
|
|
||||||
|
config SOCK_CGROUP_DATA
|
||||||
|
bool
|
||||||
|
default n
|
||||||
|
|
||||||
endif # CGROUPS
|
endif # CGROUPS
|
||||||
|
|
||||||
config CHECKPOINT_RESTORE
|
config CHECKPOINT_RESTORE
|
||||||
|
|
|
@ -258,10 +258,6 @@ config XPS
|
||||||
config HWBM
|
config HWBM
|
||||||
bool
|
bool
|
||||||
|
|
||||||
config SOCK_CGROUP_DATA
|
|
||||||
bool
|
|
||||||
default n
|
|
||||||
|
|
||||||
config CGROUP_NET_PRIO
|
config CGROUP_NET_PRIO
|
||||||
bool "Network priority cgroup"
|
bool "Network priority cgroup"
|
||||||
depends on CGROUPS
|
depends on CGROUPS
|
||||||
|
|
|
@ -4441,7 +4441,9 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
|
||||||
pinfo->nr_frags &&
|
pinfo->nr_frags &&
|
||||||
!PageHighMem(skb_frag_page(frag0))) {
|
!PageHighMem(skb_frag_page(frag0))) {
|
||||||
NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
|
NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
|
||||||
NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
|
NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
|
||||||
|
skb_frag_size(frag0),
|
||||||
|
skb->end - skb->tail);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -67,8 +67,8 @@ EXPORT_SYMBOL(skb_flow_dissector_init);
|
||||||
* The function will try to retrieve a be32 entity at
|
* The function will try to retrieve a be32 entity at
|
||||||
* offset poff
|
* offset poff
|
||||||
*/
|
*/
|
||||||
__be16 skb_flow_get_be16(const struct sk_buff *skb, int poff, void *data,
|
static __be16 skb_flow_get_be16(const struct sk_buff *skb, int poff,
|
||||||
int hlen)
|
void *data, int hlen)
|
||||||
{
|
{
|
||||||
__be16 *u, _u;
|
__be16 *u, _u;
|
||||||
|
|
||||||
|
|
|
@ -222,7 +222,7 @@ static const char *const af_family_key_strings[AF_MAX+1] = {
|
||||||
"sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
|
"sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
|
||||||
"sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
|
"sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
|
||||||
"sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_KCM" ,
|
"sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_KCM" ,
|
||||||
"sk_lock-AF_MAX"
|
"sk_lock-AF_QIPCRTR", "sk_lock-AF_MAX"
|
||||||
};
|
};
|
||||||
static const char *const af_family_slock_key_strings[AF_MAX+1] = {
|
static const char *const af_family_slock_key_strings[AF_MAX+1] = {
|
||||||
"slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
|
"slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
|
||||||
|
@ -239,7 +239,7 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = {
|
||||||
"slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
|
"slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
|
||||||
"slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
|
"slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
|
||||||
"slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_KCM" ,
|
"slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_KCM" ,
|
||||||
"slock-AF_MAX"
|
"slock-AF_QIPCRTR", "slock-AF_MAX"
|
||||||
};
|
};
|
||||||
static const char *const af_family_clock_key_strings[AF_MAX+1] = {
|
static const char *const af_family_clock_key_strings[AF_MAX+1] = {
|
||||||
"clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
|
"clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
|
||||||
|
@ -256,7 +256,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
|
||||||
"clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
|
"clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
|
||||||
"clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
|
"clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
|
||||||
"clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" ,
|
"clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" ,
|
||||||
"clock-AF_MAX"
|
"clock-AF_QIPCRTR", "clock-AF_MAX"
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -394,9 +394,11 @@ static int dsa_dst_apply(struct dsa_switch_tree *dst)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (dst->ds[0]) {
|
||||||
err = dsa_cpu_port_ethtool_setup(dst->ds[0]);
|
err = dsa_cpu_port_ethtool_setup(dst->ds[0]);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
/* If we use a tagging format that doesn't have an ethertype
|
/* If we use a tagging format that doesn't have an ethertype
|
||||||
* field, make sure that all packets from this point on get
|
* field, make sure that all packets from this point on get
|
||||||
|
@ -433,6 +435,7 @@ static void dsa_dst_unapply(struct dsa_switch_tree *dst)
|
||||||
dsa_ds_unapply(dst, ds);
|
dsa_ds_unapply(dst, ds);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (dst->ds[0])
|
||||||
dsa_cpu_port_ethtool_restore(dst->ds[0]);
|
dsa_cpu_port_ethtool_restore(dst->ds[0]);
|
||||||
|
|
||||||
pr_info("DSA: tree %d unapplied\n", dst->tree);
|
pr_info("DSA: tree %d unapplied\n", dst->tree);
|
||||||
|
|
|
@ -1618,8 +1618,13 @@ void fib_select_multipath(struct fib_result *res, int hash)
|
||||||
void fib_select_path(struct net *net, struct fib_result *res,
|
void fib_select_path(struct net *net, struct fib_result *res,
|
||||||
struct flowi4 *fl4, int mp_hash)
|
struct flowi4 *fl4, int mp_hash)
|
||||||
{
|
{
|
||||||
|
bool oif_check;
|
||||||
|
|
||||||
|
oif_check = (fl4->flowi4_oif == 0 ||
|
||||||
|
fl4->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF);
|
||||||
|
|
||||||
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
||||||
if (res->fi->fib_nhs > 1 && fl4->flowi4_oif == 0) {
|
if (res->fi->fib_nhs > 1 && oif_check) {
|
||||||
if (mp_hash < 0)
|
if (mp_hash < 0)
|
||||||
mp_hash = get_hash_from_flowi4(fl4) >> 1;
|
mp_hash = get_hash_from_flowi4(fl4) >> 1;
|
||||||
|
|
||||||
|
@ -1629,7 +1634,7 @@ void fib_select_path(struct net *net, struct fib_result *res,
|
||||||
#endif
|
#endif
|
||||||
if (!res->prefixlen &&
|
if (!res->prefixlen &&
|
||||||
res->table->tb_num_default > 1 &&
|
res->table->tb_num_default > 1 &&
|
||||||
res->type == RTN_UNICAST && !fl4->flowi4_oif)
|
res->type == RTN_UNICAST && oif_check)
|
||||||
fib_select_default(fl4, res);
|
fib_select_default(fl4, res);
|
||||||
|
|
||||||
if (!fl4->saddr)
|
if (!fl4->saddr)
|
||||||
|
|
|
@ -951,7 +951,7 @@ static struct ctl_table ipv4_net_table[] = {
|
||||||
.data = &init_net.ipv4.sysctl_tcp_notsent_lowat,
|
.data = &init_net.ipv4.sysctl_tcp_notsent_lowat,
|
||||||
.maxlen = sizeof(unsigned int),
|
.maxlen = sizeof(unsigned int),
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = proc_dointvec,
|
.proc_handler = proc_douintvec,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.procname = "tcp_tw_reuse",
|
.procname = "tcp_tw_reuse",
|
||||||
|
|
|
@ -606,7 +606,6 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
|
|
||||||
|
|
||||||
void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
|
void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
|
||||||
{
|
{
|
||||||
|
|
|
@ -191,6 +191,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
|
||||||
ops = rcu_dereference(inet6_offloads[proto]);
|
ops = rcu_dereference(inet6_offloads[proto]);
|
||||||
if (!ops || !ops->callbacks.gro_receive) {
|
if (!ops || !ops->callbacks.gro_receive) {
|
||||||
__pskb_pull(skb, skb_gro_offset(skb));
|
__pskb_pull(skb, skb_gro_offset(skb));
|
||||||
|
skb_gro_frag0_invalidate(skb);
|
||||||
proto = ipv6_gso_pull_exthdrs(skb, proto);
|
proto = ipv6_gso_pull_exthdrs(skb, proto);
|
||||||
skb_gro_pull(skb, -skb_transport_offset(skb));
|
skb_gro_pull(skb, -skb_transport_offset(skb));
|
||||||
skb_reset_transport_header(skb);
|
skb_reset_transport_header(skb);
|
||||||
|
|
|
@ -1464,7 +1464,7 @@ static struct rt6_info *__ip6_route_redirect(struct net *net,
|
||||||
struct fib6_node *fn;
|
struct fib6_node *fn;
|
||||||
|
|
||||||
/* Get the "current" route for this destination and
|
/* Get the "current" route for this destination and
|
||||||
* check if the redirect has come from approriate router.
|
* check if the redirect has come from appropriate router.
|
||||||
*
|
*
|
||||||
* RFC 4861 specifies that redirects should only be
|
* RFC 4861 specifies that redirects should only be
|
||||||
* accepted if they come from the nexthop to the target.
|
* accepted if they come from the nexthop to the target.
|
||||||
|
@ -2768,7 +2768,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
|
||||||
old MTU is the lowest MTU in the path, update the route PMTU
|
old MTU is the lowest MTU in the path, update the route PMTU
|
||||||
to reflect the increase. In this case if the other nodes' MTU
|
to reflect the increase. In this case if the other nodes' MTU
|
||||||
also have the lowest MTU, TOO BIG MESSAGE will be lead to
|
also have the lowest MTU, TOO BIG MESSAGE will be lead to
|
||||||
PMTU discouvery.
|
PMTU discovery.
|
||||||
*/
|
*/
|
||||||
if (rt->dst.dev == arg->dev &&
|
if (rt->dst.dev == arg->dev &&
|
||||||
dst_metric_raw(&rt->dst, RTAX_MTU) &&
|
dst_metric_raw(&rt->dst, RTAX_MTU) &&
|
||||||
|
|
|
@ -1044,7 +1044,8 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
|
||||||
{
|
{
|
||||||
struct sock *sk = sock->sk;
|
struct sock *sk = sock->sk;
|
||||||
struct iucv_sock *iucv = iucv_sk(sk);
|
struct iucv_sock *iucv = iucv_sk(sk);
|
||||||
size_t headroom, linear;
|
size_t headroom = 0;
|
||||||
|
size_t linear;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
struct iucv_message txmsg = {0};
|
struct iucv_message txmsg = {0};
|
||||||
struct cmsghdr *cmsg;
|
struct cmsghdr *cmsg;
|
||||||
|
@ -1122,19 +1123,21 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
|
||||||
* this is fine for SOCK_SEQPACKET (unless we want to support
|
* this is fine for SOCK_SEQPACKET (unless we want to support
|
||||||
* segmented records using the MSG_EOR flag), but
|
* segmented records using the MSG_EOR flag), but
|
||||||
* for SOCK_STREAM we might want to improve it in future */
|
* for SOCK_STREAM we might want to improve it in future */
|
||||||
headroom = (iucv->transport == AF_IUCV_TRANS_HIPER)
|
if (iucv->transport == AF_IUCV_TRANS_HIPER) {
|
||||||
? sizeof(struct af_iucv_trans_hdr) + ETH_HLEN : 0;
|
headroom = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
|
||||||
if (headroom + len < PAGE_SIZE) {
|
linear = len;
|
||||||
|
} else {
|
||||||
|
if (len < PAGE_SIZE) {
|
||||||
linear = len;
|
linear = len;
|
||||||
} else {
|
} else {
|
||||||
/* In nonlinear "classic" iucv skb,
|
/* In nonlinear "classic" iucv skb,
|
||||||
* reserve space for iucv_array
|
* reserve space for iucv_array
|
||||||
*/
|
*/
|
||||||
if (iucv->transport != AF_IUCV_TRANS_HIPER)
|
headroom = sizeof(struct iucv_array) *
|
||||||
headroom += sizeof(struct iucv_array) *
|
|
||||||
(MAX_SKB_FRAGS + 1);
|
(MAX_SKB_FRAGS + 1);
|
||||||
linear = PAGE_SIZE - headroom;
|
linear = PAGE_SIZE - headroom;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
|
skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
|
||||||
noblock, &err, 0);
|
noblock, &err, 0);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
|
|
|
@ -252,7 +252,7 @@ static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
|
||||||
const int pkt_len = 20;
|
const int pkt_len = 20;
|
||||||
struct qrtr_hdr *hdr;
|
struct qrtr_hdr *hdr;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
u32 *buf;
|
__le32 *buf;
|
||||||
|
|
||||||
skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL);
|
skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
|
@ -269,7 +269,7 @@ static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
|
||||||
hdr->dst_node_id = cpu_to_le32(dst_node);
|
hdr->dst_node_id = cpu_to_le32(dst_node);
|
||||||
hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
|
hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
|
||||||
|
|
||||||
buf = (u32 *)skb_put(skb, pkt_len);
|
buf = (__le32 *)skb_put(skb, pkt_len);
|
||||||
memset(buf, 0, pkt_len);
|
memset(buf, 0, pkt_len);
|
||||||
buf[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX);
|
buf[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX);
|
||||||
buf[1] = cpu_to_le32(src_node);
|
buf[1] = cpu_to_le32(src_node);
|
||||||
|
|
|
@ -1048,7 +1048,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
|
||||||
(new_transport->state == SCTP_PF)))
|
(new_transport->state == SCTP_PF)))
|
||||||
new_transport = asoc->peer.active_path;
|
new_transport = asoc->peer.active_path;
|
||||||
if (new_transport->state == SCTP_UNCONFIRMED) {
|
if (new_transport->state == SCTP_UNCONFIRMED) {
|
||||||
WARN_ONCE(1, "Atempt to send packet on unconfirmed path.");
|
WARN_ONCE(1, "Attempt to send packet on unconfirmed path.");
|
||||||
sctp_chunk_fail(chunk, 0);
|
sctp_chunk_fail(chunk, 0);
|
||||||
sctp_chunk_free(chunk);
|
sctp_chunk_free(chunk);
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -533,7 +533,7 @@ static ssize_t sockfs_listxattr(struct dentry *dentry, char *buffer,
|
||||||
return used;
|
return used;
|
||||||
}
|
}
|
||||||
|
|
||||||
int sockfs_setattr(struct dentry *dentry, struct iattr *iattr)
|
static int sockfs_setattr(struct dentry *dentry, struct iattr *iattr)
|
||||||
{
|
{
|
||||||
int err = simple_setattr(dentry, iattr);
|
int err = simple_setattr(dentry, iattr);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue