Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from David Miller: 1) Use 32-bit index for tails calls in s390 bpf JIT, from Ilya Leoshkevich. 2) Fix missed EPOLLOUT events in TCP, from Eric Dumazet. Same fix for SMC from Jason Baron. 3) ipv6_mc_may_pull() should return 0 for malformed packets, not -EINVAL. From Stefano Brivio. 4) Don't forget to unpin umem xdp pages in error path of xdp_umem_reg(). From Ivan Khoronzhuk. 5) Fix sta object leak in mac80211, from Johannes Berg. 6) Fix regression by not configuring PHYLINK on CPU port of bcm_sf2 switches. From Florian Fainelli. 7) Revert DMA sync removal from r8169 which was causing regressions on some MIPS Loongson platforms. From Heiner Kallweit. 8) Use after free in flow dissector, from Jakub Sitnicki. 9) Fix NULL derefs of net devices during ICMP processing across collect_md tunnels, from Hangbin Liu. 10) proto_register() memory leaks, from Zhang Lin. 11) Set NLM_F_MULTI flag in multipart netlink messages consistently, from John Fastabend. * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (66 commits) r8152: Set memory to all 0xFFs on failed reg reads openvswitch: Fix conntrack cache with timeout ipv4: mpls: fix mpls_xmit for iptunnel nexthop: Fix nexthop_num_path for blackhole nexthops net: rds: add service level support in rds-info net: route dump netlink NLM_F_MULTI flag missing s390/qeth: reject oversized SNMP requests sock: fix potential memory leak in proto_register() MAINTAINERS: Add phylink keyword to SFF/SFP/SFP+ MODULE SUPPORT xfrm/xfrm_policy: fix dst dev null pointer dereference in collect_md mode ipv4/icmp: fix rt dst dev null pointer dereference openvswitch: Fix log message in ovs conntrack bpf: allow narrow loads of some sk_reuseport_md fields with offset > 0 bpf: fix use after free in prog symbol exposure bpf: fix precision tracking in presence of bpf2bpf calls flow_dissector: Fix potential use-after-free on BPF_PROG_DETACH Revert "r8169: remove not needed call to dma_sync_single_for_device" ipv6: propagate ipv6_add_dev's error returns out of ipv6_find_idev net/ncsi: Fix the payload copying for the request coming from Netlink qed: Add cleanup in qed_slowpath_start() ...
This commit is contained in:
commit
452a04441b
|
@ -8454,11 +8454,6 @@ S: Maintained
|
||||||
F: fs/io_uring.c
|
F: fs/io_uring.c
|
||||||
F: include/uapi/linux/io_uring.h
|
F: include/uapi/linux/io_uring.h
|
||||||
|
|
||||||
IP MASQUERADING
|
|
||||||
M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
|
|
||||||
S: Maintained
|
|
||||||
F: net/ipv4/netfilter/ipt_MASQUERADE.c
|
|
||||||
|
|
||||||
IPMI SUBSYSTEM
|
IPMI SUBSYSTEM
|
||||||
M: Corey Minyard <minyard@acm.org>
|
M: Corey Minyard <minyard@acm.org>
|
||||||
L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers)
|
L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers)
|
||||||
|
@ -11086,7 +11081,7 @@ NET_FAILOVER MODULE
|
||||||
M: Sridhar Samudrala <sridhar.samudrala@intel.com>
|
M: Sridhar Samudrala <sridhar.samudrala@intel.com>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: driver/net/net_failover.c
|
F: drivers/net/net_failover.c
|
||||||
F: include/net/net_failover.h
|
F: include/net/net_failover.h
|
||||||
F: Documentation/networking/net_failover.rst
|
F: Documentation/networking/net_failover.rst
|
||||||
|
|
||||||
|
@ -14478,6 +14473,7 @@ F: drivers/net/phy/phylink.c
|
||||||
F: drivers/net/phy/sfp*
|
F: drivers/net/phy/sfp*
|
||||||
F: include/linux/phylink.h
|
F: include/linux/phylink.h
|
||||||
F: include/linux/sfp.h
|
F: include/linux/sfp.h
|
||||||
|
K: phylink
|
||||||
|
|
||||||
SGI GRU DRIVER
|
SGI GRU DRIVER
|
||||||
M: Dimitri Sivanich <sivanich@sgi.com>
|
M: Dimitri Sivanich <sivanich@sgi.com>
|
||||||
|
|
|
@ -863,7 +863,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
|
||||||
break;
|
break;
|
||||||
case BPF_ALU64 | BPF_NEG: /* dst = -dst */
|
case BPF_ALU64 | BPF_NEG: /* dst = -dst */
|
||||||
/* lcgr %dst,%dst */
|
/* lcgr %dst,%dst */
|
||||||
EMIT4(0xb9130000, dst_reg, dst_reg);
|
EMIT4(0xb9030000, dst_reg, dst_reg);
|
||||||
break;
|
break;
|
||||||
/*
|
/*
|
||||||
* BPF_FROM_BE/LE
|
* BPF_FROM_BE/LE
|
||||||
|
@ -1049,8 +1049,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
|
||||||
/* llgf %w1,map.max_entries(%b2) */
|
/* llgf %w1,map.max_entries(%b2) */
|
||||||
EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
|
EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
|
||||||
offsetof(struct bpf_array, map.max_entries));
|
offsetof(struct bpf_array, map.max_entries));
|
||||||
/* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */
|
/* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */
|
||||||
EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3,
|
EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
|
||||||
REG_W1, 0, 0xa);
|
REG_W1, 0, 0xa);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1076,8 +1076,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
|
||||||
* goto out;
|
* goto out;
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* sllg %r1,%b3,3: %r1 = index * 8 */
|
/* llgfr %r1,%b3: %r1 = (u32) index */
|
||||||
EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3);
|
EMIT4(0xb9160000, REG_1, BPF_REG_3);
|
||||||
|
/* sllg %r1,%r1,3: %r1 *= 8 */
|
||||||
|
EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
|
||||||
/* lg %r1,prog(%b2,%r1) */
|
/* lg %r1,prog(%b2,%r1) */
|
||||||
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
|
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
|
||||||
REG_1, offsetof(struct bpf_array, ptrs));
|
REG_1, offsetof(struct bpf_array, ptrs));
|
||||||
|
|
|
@ -200,7 +200,7 @@ config ATM_NICSTAR_USE_SUNI
|
||||||
make the card work).
|
make the card work).
|
||||||
|
|
||||||
config ATM_NICSTAR_USE_IDT77105
|
config ATM_NICSTAR_USE_IDT77105
|
||||||
bool "Use IDT77015 PHY driver (25Mbps)"
|
bool "Use IDT77105 PHY driver (25Mbps)"
|
||||||
depends on ATM_NICSTAR
|
depends on ATM_NICSTAR
|
||||||
help
|
help
|
||||||
Support for the PHYsical layer chip in ForeRunner LE25 cards. In
|
Support for the PHYsical layer chip in ForeRunner LE25 cards. In
|
||||||
|
|
|
@ -478,6 +478,7 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
|
||||||
unsigned long *supported,
|
unsigned long *supported,
|
||||||
struct phylink_link_state *state)
|
struct phylink_link_state *state)
|
||||||
{
|
{
|
||||||
|
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
|
||||||
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
|
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
|
||||||
|
|
||||||
if (!phy_interface_mode_is_rgmii(state->interface) &&
|
if (!phy_interface_mode_is_rgmii(state->interface) &&
|
||||||
|
@ -487,8 +488,10 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
|
||||||
state->interface != PHY_INTERFACE_MODE_INTERNAL &&
|
state->interface != PHY_INTERFACE_MODE_INTERNAL &&
|
||||||
state->interface != PHY_INTERFACE_MODE_MOCA) {
|
state->interface != PHY_INTERFACE_MODE_MOCA) {
|
||||||
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
|
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
|
||||||
dev_err(ds->dev,
|
if (port != core_readl(priv, CORE_IMP0_PRT_ID))
|
||||||
"Unsupported interface: %d\n", state->interface);
|
dev_err(ds->dev,
|
||||||
|
"Unsupported interface: %d for port %d\n",
|
||||||
|
state->interface, port);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -526,6 +529,9 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
|
||||||
u32 id_mode_dis = 0, port_mode;
|
u32 id_mode_dis = 0, port_mode;
|
||||||
u32 reg, offset;
|
u32 reg, offset;
|
||||||
|
|
||||||
|
if (port == core_readl(priv, CORE_IMP0_PRT_ID))
|
||||||
|
return;
|
||||||
|
|
||||||
if (priv->type == BCM7445_DEVICE_ID)
|
if (priv->type == BCM7445_DEVICE_ID)
|
||||||
offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
|
offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
|
||||||
else
|
else
|
||||||
|
|
|
@ -50,7 +50,7 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
|
||||||
u64_stats_fetch_begin(&priv->tx[ring].statss);
|
u64_stats_fetch_begin(&priv->tx[ring].statss);
|
||||||
s->tx_packets += priv->tx[ring].pkt_done;
|
s->tx_packets += priv->tx[ring].pkt_done;
|
||||||
s->tx_bytes += priv->tx[ring].bytes_done;
|
s->tx_bytes += priv->tx[ring].bytes_done;
|
||||||
} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
|
} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
|
||||||
start));
|
start));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -109,13 +109,15 @@ build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn,
|
||||||
|
|
||||||
static void tx_fill_wi(struct mlx5e_txqsq *sq,
|
static void tx_fill_wi(struct mlx5e_txqsq *sq,
|
||||||
u16 pi, u8 num_wqebbs,
|
u16 pi, u8 num_wqebbs,
|
||||||
skb_frag_t *resync_dump_frag)
|
skb_frag_t *resync_dump_frag,
|
||||||
|
u32 num_bytes)
|
||||||
{
|
{
|
||||||
struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
|
struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
|
||||||
|
|
||||||
wi->skb = NULL;
|
wi->skb = NULL;
|
||||||
wi->num_wqebbs = num_wqebbs;
|
wi->num_wqebbs = num_wqebbs;
|
||||||
wi->resync_dump_frag = resync_dump_frag;
|
wi->resync_dump_frag = resync_dump_frag;
|
||||||
|
wi->num_bytes = num_bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
|
void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
|
||||||
|
@ -143,7 +145,7 @@ post_static_params(struct mlx5e_txqsq *sq,
|
||||||
|
|
||||||
umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
|
umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
|
||||||
build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
|
build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
|
||||||
tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL);
|
tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL, 0);
|
||||||
sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
|
sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -157,7 +159,7 @@ post_progress_params(struct mlx5e_txqsq *sq,
|
||||||
|
|
||||||
wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
|
wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
|
||||||
build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
|
build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
|
||||||
tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL);
|
tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL, 0);
|
||||||
sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
|
sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -248,43 +250,37 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
|
||||||
mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
|
mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct mlx5e_dump_wqe {
|
||||||
|
struct mlx5_wqe_ctrl_seg ctrl;
|
||||||
|
struct mlx5_wqe_data_seg data;
|
||||||
|
};
|
||||||
|
|
||||||
static int
|
static int
|
||||||
tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||||
skb_frag_t *frag, u32 tisn, bool first)
|
skb_frag_t *frag, u32 tisn, bool first)
|
||||||
{
|
{
|
||||||
struct mlx5_wqe_ctrl_seg *cseg;
|
struct mlx5_wqe_ctrl_seg *cseg;
|
||||||
struct mlx5_wqe_eth_seg *eseg;
|
|
||||||
struct mlx5_wqe_data_seg *dseg;
|
struct mlx5_wqe_data_seg *dseg;
|
||||||
struct mlx5e_tx_wqe *wqe;
|
struct mlx5e_dump_wqe *wqe;
|
||||||
dma_addr_t dma_addr = 0;
|
dma_addr_t dma_addr = 0;
|
||||||
u16 ds_cnt, ds_cnt_inl;
|
|
||||||
u8 num_wqebbs;
|
u8 num_wqebbs;
|
||||||
u16 pi, ihs;
|
u16 ds_cnt;
|
||||||
int fsz;
|
int fsz;
|
||||||
|
u16 pi;
|
||||||
ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
|
|
||||||
ihs = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
|
|
||||||
ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
|
|
||||||
ds_cnt += ds_cnt_inl;
|
|
||||||
ds_cnt += 1; /* one frag */
|
|
||||||
|
|
||||||
wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
|
wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
|
||||||
|
|
||||||
|
ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
|
||||||
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
|
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
|
||||||
|
|
||||||
cseg = &wqe->ctrl;
|
cseg = &wqe->ctrl;
|
||||||
eseg = &wqe->eth;
|
dseg = &wqe->data;
|
||||||
dseg = wqe->data;
|
|
||||||
|
|
||||||
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
|
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
|
||||||
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
|
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
|
||||||
cseg->tisn = cpu_to_be32(tisn << 8);
|
cseg->tisn = cpu_to_be32(tisn << 8);
|
||||||
cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
|
cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
|
||||||
|
|
||||||
eseg->inline_hdr.sz = cpu_to_be16(ihs);
|
|
||||||
memcpy(eseg->inline_hdr.start, skb->data, ihs);
|
|
||||||
dseg += ds_cnt_inl;
|
|
||||||
|
|
||||||
fsz = skb_frag_size(frag);
|
fsz = skb_frag_size(frag);
|
||||||
dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
|
dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
|
@ -296,7 +292,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||||
dseg->byte_count = cpu_to_be32(fsz);
|
dseg->byte_count = cpu_to_be32(fsz);
|
||||||
mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
|
mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
|
||||||
|
|
||||||
tx_fill_wi(sq, pi, num_wqebbs, frag);
|
tx_fill_wi(sq, pi, num_wqebbs, frag, fsz);
|
||||||
sq->pc += num_wqebbs;
|
sq->pc += num_wqebbs;
|
||||||
|
|
||||||
WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS,
|
WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS,
|
||||||
|
@ -323,7 +319,7 @@ static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
|
||||||
struct mlx5_wq_cyc *wq = &sq->wq;
|
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||||
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
|
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
|
||||||
|
|
||||||
tx_fill_wi(sq, pi, 1, NULL);
|
tx_fill_wi(sq, pi, 1, NULL, 0);
|
||||||
|
|
||||||
mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
|
mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
|
||||||
}
|
}
|
||||||
|
|
|
@ -590,7 +590,8 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
|
||||||
data_size = crdump_size - offset;
|
data_size = crdump_size - offset;
|
||||||
else
|
else
|
||||||
data_size = MLX5_CR_DUMP_CHUNK_SIZE;
|
data_size = MLX5_CR_DUMP_CHUNK_SIZE;
|
||||||
err = devlink_fmsg_binary_put(fmsg, cr_data, data_size);
|
err = devlink_fmsg_binary_put(fmsg, (char *)cr_data + offset,
|
||||||
|
data_size);
|
||||||
if (err)
|
if (err)
|
||||||
goto free_data;
|
goto free_data;
|
||||||
}
|
}
|
||||||
|
@ -700,6 +701,16 @@ static void poll_health(struct timer_list *t)
|
||||||
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
|
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
fatal_error = check_fatal_sensors(dev);
|
||||||
|
|
||||||
|
if (fatal_error && !health->fatal_error) {
|
||||||
|
mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
|
||||||
|
dev->priv.health.fatal_error = fatal_error;
|
||||||
|
print_health_info(dev);
|
||||||
|
mlx5_trigger_health_work(dev);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
count = ioread32be(health->health_counter);
|
count = ioread32be(health->health_counter);
|
||||||
if (count == health->prev)
|
if (count == health->prev)
|
||||||
++health->miss_counter;
|
++health->miss_counter;
|
||||||
|
@ -718,15 +729,6 @@ static void poll_health(struct timer_list *t)
|
||||||
if (health->synd && health->synd != prev_synd)
|
if (health->synd && health->synd != prev_synd)
|
||||||
queue_work(health->wq, &health->report_work);
|
queue_work(health->wq, &health->report_work);
|
||||||
|
|
||||||
fatal_error = check_fatal_sensors(dev);
|
|
||||||
|
|
||||||
if (fatal_error && !health->fatal_error) {
|
|
||||||
mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
|
|
||||||
dev->priv.health.fatal_error = fatal_error;
|
|
||||||
print_health_info(dev);
|
|
||||||
mlx5_trigger_health_work(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mod_timer(&health->timer, get_next_poll_jiffies());
|
mod_timer(&health->timer, get_next_poll_jiffies());
|
||||||
}
|
}
|
||||||
|
|
|
@ -317,7 +317,7 @@ static void is2_action_set(struct vcap_data *data,
|
||||||
break;
|
break;
|
||||||
case OCELOT_ACL_ACTION_TRAP:
|
case OCELOT_ACL_ACTION_TRAP:
|
||||||
VCAP_ACT_SET(PORT_MASK, 0x0);
|
VCAP_ACT_SET(PORT_MASK, 0x0);
|
||||||
VCAP_ACT_SET(MASK_MODE, 0x0);
|
VCAP_ACT_SET(MASK_MODE, 0x1);
|
||||||
VCAP_ACT_SET(POLICE_ENA, 0x0);
|
VCAP_ACT_SET(POLICE_ENA, 0x0);
|
||||||
VCAP_ACT_SET(POLICE_IDX, 0x0);
|
VCAP_ACT_SET(POLICE_IDX, 0x0);
|
||||||
VCAP_ACT_SET(CPU_QU_NUM, 0x0);
|
VCAP_ACT_SET(CPU_QU_NUM, 0x0);
|
||||||
|
|
|
@ -1416,6 +1416,13 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
|
||||||
|
|
||||||
switch (f->command) {
|
switch (f->command) {
|
||||||
case FLOW_BLOCK_BIND:
|
case FLOW_BLOCK_BIND:
|
||||||
|
cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
|
||||||
|
if (cb_priv &&
|
||||||
|
flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb,
|
||||||
|
cb_priv,
|
||||||
|
&nfp_block_cb_list))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
|
cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
|
||||||
if (!cb_priv)
|
if (!cb_priv)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -1325,7 +1325,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
|
||||||
&drv_version);
|
&drv_version);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
DP_NOTICE(cdev, "Failed sending drv version command\n");
|
DP_NOTICE(cdev, "Failed sending drv version command\n");
|
||||||
return rc;
|
goto err4;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1333,6 +1333,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err4:
|
||||||
|
qed_ll2_dealloc_if(cdev);
|
||||||
err3:
|
err3:
|
||||||
qed_hw_stop(cdev);
|
qed_hw_stop(cdev);
|
||||||
err2:
|
err2:
|
||||||
|
|
|
@ -5921,6 +5921,7 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data,
|
||||||
skb = napi_alloc_skb(&tp->napi, pkt_size);
|
skb = napi_alloc_skb(&tp->napi, pkt_size);
|
||||||
if (skb)
|
if (skb)
|
||||||
skb_copy_to_linear_data(skb, data, pkt_size);
|
skb_copy_to_linear_data(skb, data, pkt_size);
|
||||||
|
dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
|
||||||
|
|
||||||
return skb;
|
return skb;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2775,6 +2775,7 @@ static int cpsw_probe(struct platform_device *pdev)
|
||||||
if (!cpsw)
|
if (!cpsw)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
platform_set_drvdata(pdev, cpsw);
|
||||||
cpsw->dev = dev;
|
cpsw->dev = dev;
|
||||||
|
|
||||||
mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
|
mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
|
||||||
|
@ -2879,7 +2880,6 @@ static int cpsw_probe(struct platform_device *pdev)
|
||||||
goto clean_cpts;
|
goto clean_cpts;
|
||||||
}
|
}
|
||||||
|
|
||||||
platform_set_drvdata(pdev, cpsw);
|
|
||||||
priv = netdev_priv(ndev);
|
priv = netdev_priv(ndev);
|
||||||
priv->cpsw = cpsw;
|
priv->cpsw = cpsw;
|
||||||
priv->ndev = ndev;
|
priv->ndev = ndev;
|
||||||
|
|
|
@ -802,7 +802,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
|
||||||
err = hwsim_subscribe_all_others(phy);
|
err = hwsim_subscribe_all_others(phy);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
mutex_unlock(&hwsim_phys_lock);
|
mutex_unlock(&hwsim_phys_lock);
|
||||||
goto err_reg;
|
goto err_subscribe;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
list_add_tail(&phy->list, &hwsim_phys);
|
list_add_tail(&phy->list, &hwsim_phys);
|
||||||
|
@ -812,6 +812,8 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
|
||||||
|
|
||||||
return idx;
|
return idx;
|
||||||
|
|
||||||
|
err_subscribe:
|
||||||
|
ieee802154_unregister_hw(phy->hw);
|
||||||
err_reg:
|
err_reg:
|
||||||
kfree(pib);
|
kfree(pib);
|
||||||
err_pib:
|
err_pib:
|
||||||
|
@ -901,9 +903,9 @@ static __init int hwsim_init_module(void)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
platform_drv:
|
platform_drv:
|
||||||
genl_unregister_family(&hwsim_genl_family);
|
|
||||||
platform_dev:
|
|
||||||
platform_device_unregister(mac802154hwsim_dev);
|
platform_device_unregister(mac802154hwsim_dev);
|
||||||
|
platform_dev:
|
||||||
|
genl_unregister_family(&hwsim_genl_family);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -799,8 +799,11 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
|
||||||
ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
|
ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
|
||||||
RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
|
RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
|
||||||
value, index, tmp, size, 500);
|
value, index, tmp, size, 500);
|
||||||
|
if (ret < 0)
|
||||||
|
memset(data, 0xff, size);
|
||||||
|
else
|
||||||
|
memcpy(data, tmp, size);
|
||||||
|
|
||||||
memcpy(data, tmp, size);
|
|
||||||
kfree(tmp);
|
kfree(tmp);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -554,7 +554,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
|
||||||
cpu_to_le32(vif->bss_conf.use_short_slot ?
|
cpu_to_le32(vif->bss_conf.use_short_slot ?
|
||||||
MAC_FLG_SHORT_SLOT : 0);
|
MAC_FLG_SHORT_SLOT : 0);
|
||||||
|
|
||||||
cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
|
cmd->filter_flags = 0;
|
||||||
|
|
||||||
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
|
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
|
||||||
u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i);
|
u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i);
|
||||||
|
@ -623,6 +623,8 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
|
||||||
/* We need the dtim_period to set the MAC as associated */
|
/* We need the dtim_period to set the MAC as associated */
|
||||||
if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
|
if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
|
||||||
!force_assoc_off) {
|
!force_assoc_off) {
|
||||||
|
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||||
|
u8 ap_sta_id = mvmvif->ap_sta_id;
|
||||||
u32 dtim_offs;
|
u32 dtim_offs;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -658,6 +660,29 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
|
||||||
dtim_offs);
|
dtim_offs);
|
||||||
|
|
||||||
ctxt_sta->is_assoc = cpu_to_le32(1);
|
ctxt_sta->is_assoc = cpu_to_le32(1);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* allow multicast data frames only as long as the station is
|
||||||
|
* authorized, i.e., GTK keys are already installed (if needed)
|
||||||
|
*/
|
||||||
|
if (ap_sta_id < IWL_MVM_STATION_COUNT) {
|
||||||
|
struct ieee80211_sta *sta;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
|
||||||
|
sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]);
|
||||||
|
if (!IS_ERR_OR_NULL(sta)) {
|
||||||
|
struct iwl_mvm_sta *mvmsta =
|
||||||
|
iwl_mvm_sta_from_mac80211(sta);
|
||||||
|
|
||||||
|
if (mvmsta->sta_state ==
|
||||||
|
IEEE80211_STA_AUTHORIZED)
|
||||||
|
cmd.filter_flags |=
|
||||||
|
cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
|
||||||
|
}
|
||||||
|
|
||||||
|
rcu_read_unlock();
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
ctxt_sta->is_assoc = cpu_to_le32(0);
|
ctxt_sta->is_assoc = cpu_to_le32(0);
|
||||||
|
|
||||||
|
@ -703,7 +728,8 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
|
||||||
MAC_FILTER_IN_CONTROL_AND_MGMT |
|
MAC_FILTER_IN_CONTROL_AND_MGMT |
|
||||||
MAC_FILTER_IN_BEACON |
|
MAC_FILTER_IN_BEACON |
|
||||||
MAC_FILTER_IN_PROBE_REQUEST |
|
MAC_FILTER_IN_PROBE_REQUEST |
|
||||||
MAC_FILTER_IN_CRC32);
|
MAC_FILTER_IN_CRC32 |
|
||||||
|
MAC_FILTER_ACCEPT_GRP);
|
||||||
ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
|
ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
|
||||||
|
|
||||||
/* Allocate sniffer station */
|
/* Allocate sniffer station */
|
||||||
|
@ -727,7 +753,8 @@ static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm,
|
||||||
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
|
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
|
||||||
|
|
||||||
cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON |
|
cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON |
|
||||||
MAC_FILTER_IN_PROBE_REQUEST);
|
MAC_FILTER_IN_PROBE_REQUEST |
|
||||||
|
MAC_FILTER_ACCEPT_GRP);
|
||||||
|
|
||||||
/* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */
|
/* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */
|
||||||
cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int);
|
cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int);
|
||||||
|
|
|
@ -3327,10 +3327,20 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
|
||||||
/* enable beacon filtering */
|
/* enable beacon filtering */
|
||||||
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
|
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Now that the station is authorized, i.e., keys were already
|
||||||
|
* installed, need to indicate to the FW that
|
||||||
|
* multicast data frames can be forwarded to the driver
|
||||||
|
*/
|
||||||
|
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
|
||||||
|
|
||||||
iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
|
iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
|
||||||
true);
|
true);
|
||||||
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
|
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
|
||||||
new_state == IEEE80211_STA_ASSOC) {
|
new_state == IEEE80211_STA_ASSOC) {
|
||||||
|
/* Multicast data frames are no longer allowed */
|
||||||
|
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
|
||||||
|
|
||||||
/* disable beacon filtering */
|
/* disable beacon filtering */
|
||||||
ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
|
ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
|
||||||
WARN_ON(ret &&
|
WARN_ON(ret &&
|
||||||
|
|
|
@ -1063,6 +1063,23 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
|
else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
|
||||||
iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0;
|
iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* same thing for QuZ... */
|
||||||
|
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
|
||||||
|
if (cfg == &iwl_ax101_cfg_qu_hr)
|
||||||
|
cfg = &iwl_ax101_cfg_quz_hr;
|
||||||
|
else if (cfg == &iwl_ax201_cfg_qu_hr)
|
||||||
|
cfg = &iwl_ax201_cfg_quz_hr;
|
||||||
|
else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
|
||||||
|
cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
|
||||||
|
else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
|
||||||
|
cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
|
||||||
|
else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
|
||||||
|
cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
|
||||||
|
else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
|
||||||
|
cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
pci_set_drvdata(pdev, iwl_trans);
|
pci_set_drvdata(pdev, iwl_trans);
|
||||||
|
|
|
@ -3603,6 +3603,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
||||||
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
|
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
|
||||||
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
|
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
|
||||||
((trans->cfg != &iwl_ax200_cfg_cc &&
|
((trans->cfg != &iwl_ax200_cfg_cc &&
|
||||||
|
trans->cfg != &iwl_ax201_cfg_qu_hr &&
|
||||||
trans->cfg != &killer1650x_2ax_cfg &&
|
trans->cfg != &killer1650x_2ax_cfg &&
|
||||||
trans->cfg != &killer1650w_2ax_cfg &&
|
trans->cfg != &killer1650w_2ax_cfg &&
|
||||||
trans->cfg != &iwl_ax201_cfg_quz_hr) ||
|
trans->cfg != &iwl_ax201_cfg_quz_hr) ||
|
||||||
|
|
|
@ -99,10 +99,7 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
|
||||||
u16 len = byte_cnt;
|
u16 len = byte_cnt;
|
||||||
__le16 bc_ent;
|
__le16 bc_ent;
|
||||||
|
|
||||||
if (trans_pcie->bc_table_dword)
|
if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
|
||||||
len = DIV_ROUND_UP(len, 4);
|
|
||||||
|
|
||||||
if (WARN_ON(len > 0xFFF || idx >= txq->n_window))
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
|
filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
|
||||||
|
@ -117,11 +114,20 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
|
||||||
*/
|
*/
|
||||||
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
|
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
|
||||||
|
|
||||||
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
|
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
|
||||||
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
|
/* Starting from 22560, the HW expects bytes */
|
||||||
|
WARN_ON(trans_pcie->bc_table_dword);
|
||||||
|
WARN_ON(len > 0x3FFF);
|
||||||
|
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
|
||||||
scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
|
scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
|
||||||
else
|
} else {
|
||||||
|
/* Until 22560, the HW expects DW */
|
||||||
|
WARN_ON(!trans_pcie->bc_table_dword);
|
||||||
|
len = DIV_ROUND_UP(len, 4);
|
||||||
|
WARN_ON(len > 0xFFF);
|
||||||
|
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
|
||||||
scd_bc_tbl->tfd_offset[idx] = bc_ent;
|
scd_bc_tbl->tfd_offset[idx] = bc_ent;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -136,11 +136,11 @@ static const struct ieee80211_ops mt76x0u_ops = {
|
||||||
.release_buffered_frames = mt76_release_buffered_frames,
|
.release_buffered_frames = mt76_release_buffered_frames,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int mt76x0u_init_hardware(struct mt76x02_dev *dev)
|
static int mt76x0u_init_hardware(struct mt76x02_dev *dev, bool reset)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
mt76x0_chip_onoff(dev, true, true);
|
mt76x0_chip_onoff(dev, true, reset);
|
||||||
|
|
||||||
if (!mt76x02_wait_for_mac(&dev->mt76))
|
if (!mt76x02_wait_for_mac(&dev->mt76))
|
||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
|
@ -173,7 +173,7 @@ static int mt76x0u_register_device(struct mt76x02_dev *dev)
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto out_err;
|
goto out_err;
|
||||||
|
|
||||||
err = mt76x0u_init_hardware(dev);
|
err = mt76x0u_init_hardware(dev, true);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto out_err;
|
goto out_err;
|
||||||
|
|
||||||
|
@ -309,7 +309,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
ret = mt76x0u_init_hardware(dev);
|
ret = mt76x0u_init_hardware(dev, false);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
|
|
@ -6094,6 +6094,15 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
|
||||||
rt2800_delete_wcid_attr(rt2x00dev, i);
|
rt2800_delete_wcid_attr(rt2x00dev, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Clear encryption initialization vectors on start, but keep them
|
||||||
|
* for watchdog reset. Otherwise we will have wrong IVs and not be
|
||||||
|
* able to keep connections after reset.
|
||||||
|
*/
|
||||||
|
if (!test_bit(DEVICE_STATE_RESET, &rt2x00dev->flags))
|
||||||
|
for (i = 0; i < 256; i++)
|
||||||
|
rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clear all beacons
|
* Clear all beacons
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -658,6 +658,7 @@ enum rt2x00_state_flags {
|
||||||
DEVICE_STATE_ENABLED_RADIO,
|
DEVICE_STATE_ENABLED_RADIO,
|
||||||
DEVICE_STATE_SCANNING,
|
DEVICE_STATE_SCANNING,
|
||||||
DEVICE_STATE_FLUSHING,
|
DEVICE_STATE_FLUSHING,
|
||||||
|
DEVICE_STATE_RESET,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Driver configuration
|
* Driver configuration
|
||||||
|
|
|
@ -1256,13 +1256,14 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
|
||||||
|
|
||||||
int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
|
int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
|
||||||
{
|
{
|
||||||
int retval;
|
int retval = 0;
|
||||||
|
|
||||||
if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) {
|
if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) {
|
||||||
/*
|
/*
|
||||||
* This is special case for ieee80211_restart_hw(), otherwise
|
* This is special case for ieee80211_restart_hw(), otherwise
|
||||||
* mac80211 never call start() two times in row without stop();
|
* mac80211 never call start() two times in row without stop();
|
||||||
*/
|
*/
|
||||||
|
set_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
|
||||||
rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev);
|
rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev);
|
||||||
rt2x00lib_stop(rt2x00dev);
|
rt2x00lib_stop(rt2x00dev);
|
||||||
}
|
}
|
||||||
|
@ -1273,14 +1274,14 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
|
||||||
*/
|
*/
|
||||||
retval = rt2x00lib_load_firmware(rt2x00dev);
|
retval = rt2x00lib_load_firmware(rt2x00dev);
|
||||||
if (retval)
|
if (retval)
|
||||||
return retval;
|
goto out;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialize the device.
|
* Initialize the device.
|
||||||
*/
|
*/
|
||||||
retval = rt2x00lib_initialize(rt2x00dev);
|
retval = rt2x00lib_initialize(rt2x00dev);
|
||||||
if (retval)
|
if (retval)
|
||||||
return retval;
|
goto out;
|
||||||
|
|
||||||
rt2x00dev->intf_ap_count = 0;
|
rt2x00dev->intf_ap_count = 0;
|
||||||
rt2x00dev->intf_sta_count = 0;
|
rt2x00dev->intf_sta_count = 0;
|
||||||
|
@ -1289,11 +1290,13 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
|
||||||
/* Enable the radio */
|
/* Enable the radio */
|
||||||
retval = rt2x00lib_enable_radio(rt2x00dev);
|
retval = rt2x00lib_enable_radio(rt2x00dev);
|
||||||
if (retval)
|
if (retval)
|
||||||
return retval;
|
goto out;
|
||||||
|
|
||||||
set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags);
|
set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags);
|
||||||
|
|
||||||
return 0;
|
out:
|
||||||
|
clear_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
|
||||||
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev)
|
void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev)
|
||||||
|
|
|
@ -4374,6 +4374,10 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
|
||||||
get_user(req_len, &ureq->hdr.req_len))
|
get_user(req_len, &ureq->hdr.req_len))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
|
/* Sanitize user input, to avoid overflows in iob size calculation: */
|
||||||
|
if (req_len > QETH_BUFSIZE)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
|
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
|
||||||
if (!iob)
|
if (!iob)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -4,6 +4,9 @@
|
||||||
* Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net>
|
* Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#ifndef _NF_CONNTRACK_H323_TYPES_H
|
||||||
|
#define _NF_CONNTRACK_H323_TYPES_H
|
||||||
|
|
||||||
typedef struct TransportAddress_ipAddress { /* SEQUENCE */
|
typedef struct TransportAddress_ipAddress { /* SEQUENCE */
|
||||||
int options; /* No use */
|
int options; /* No use */
|
||||||
unsigned int ip;
|
unsigned int ip;
|
||||||
|
@ -931,3 +934,5 @@ typedef struct RasMessage { /* CHOICE */
|
||||||
InfoRequestResponse infoRequestResponse;
|
InfoRequestResponse infoRequestResponse;
|
||||||
};
|
};
|
||||||
} RasMessage;
|
} RasMessage;
|
||||||
|
|
||||||
|
#endif /* _NF_CONNTRACK_H323_TYPES_H */
|
||||||
|
|
|
@ -206,7 +206,7 @@ static inline int ipv6_mc_may_pull(struct sk_buff *skb,
|
||||||
unsigned int len)
|
unsigned int len)
|
||||||
{
|
{
|
||||||
if (skb_transport_offset(skb) + ipv6_transport_len(skb) < len)
|
if (skb_transport_offset(skb) + ipv6_transport_len(skb) < len)
|
||||||
return -EINVAL;
|
return 0;
|
||||||
|
|
||||||
return pskb_may_pull(skb, len);
|
return pskb_may_pull(skb, len);
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,7 +52,7 @@ struct bpf_prog;
|
||||||
#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
|
#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
|
||||||
|
|
||||||
struct net {
|
struct net {
|
||||||
refcount_t passive; /* To decided when the network
|
refcount_t passive; /* To decide when the network
|
||||||
* namespace should be freed.
|
* namespace should be freed.
|
||||||
*/
|
*/
|
||||||
refcount_t count; /* To decided when the network
|
refcount_t count; /* To decided when the network
|
||||||
|
|
|
@ -141,12 +141,6 @@ static inline unsigned int nexthop_num_path(const struct nexthop *nh)
|
||||||
|
|
||||||
nh_grp = rcu_dereference_rtnl(nh->nh_grp);
|
nh_grp = rcu_dereference_rtnl(nh->nh_grp);
|
||||||
rc = nh_grp->num_nh;
|
rc = nh_grp->num_nh;
|
||||||
} else {
|
|
||||||
const struct nh_info *nhi;
|
|
||||||
|
|
||||||
nhi = rcu_dereference_rtnl(nh->nh_info);
|
|
||||||
if (nhi->reject_nh)
|
|
||||||
rc = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
|
|
|
@ -233,7 +233,7 @@ void rt_del_uncached_list(struct rtable *rt);
|
||||||
|
|
||||||
int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
|
int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
|
||||||
u32 table_id, struct fib_info *fi,
|
u32 table_id, struct fib_info *fi,
|
||||||
int *fa_index, int fa_start);
|
int *fa_index, int fa_start, unsigned int flags);
|
||||||
|
|
||||||
static inline void ip_rt_put(struct rtable *rt)
|
static inline void ip_rt_put(struct rtable *rt)
|
||||||
{
|
{
|
||||||
|
|
|
@ -11,4 +11,9 @@ struct xt_nfacct_match_info {
|
||||||
struct nf_acct *nfacct;
|
struct nf_acct *nfacct;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct xt_nfacct_match_info_v1 {
|
||||||
|
char name[NFACCT_NAME_MAX];
|
||||||
|
struct nf_acct *nfacct __attribute__((aligned(8)));
|
||||||
|
};
|
||||||
|
|
||||||
#endif /* _XT_NFACCT_MATCH_H */
|
#endif /* _XT_NFACCT_MATCH_H */
|
||||||
|
|
|
@ -250,6 +250,7 @@ struct rds_info_rdma_connection {
|
||||||
__u32 rdma_mr_max;
|
__u32 rdma_mr_max;
|
||||||
__u32 rdma_mr_size;
|
__u32 rdma_mr_size;
|
||||||
__u8 tos;
|
__u8 tos;
|
||||||
|
__u8 sl;
|
||||||
__u32 cache_allocs;
|
__u32 cache_allocs;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -265,6 +266,7 @@ struct rds6_info_rdma_connection {
|
||||||
__u32 rdma_mr_max;
|
__u32 rdma_mr_max;
|
||||||
__u32 rdma_mr_size;
|
__u32 rdma_mr_size;
|
||||||
__u8 tos;
|
__u8 tos;
|
||||||
|
__u8 sl;
|
||||||
__u32 cache_allocs;
|
__u32 cache_allocs;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1707,20 +1707,26 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
|
||||||
if (err)
|
if (err)
|
||||||
goto free_used_maps;
|
goto free_used_maps;
|
||||||
|
|
||||||
err = bpf_prog_new_fd(prog);
|
/* Upon success of bpf_prog_alloc_id(), the BPF prog is
|
||||||
if (err < 0) {
|
* effectively publicly exposed. However, retrieving via
|
||||||
/* failed to allocate fd.
|
* bpf_prog_get_fd_by_id() will take another reference,
|
||||||
* bpf_prog_put() is needed because the above
|
* therefore it cannot be gone underneath us.
|
||||||
* bpf_prog_alloc_id() has published the prog
|
*
|
||||||
* to the userspace and the userspace may
|
* Only for the time /after/ successful bpf_prog_new_fd()
|
||||||
* have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
|
* and before returning to userspace, we might just hold
|
||||||
*/
|
* one reference and any parallel close on that fd could
|
||||||
bpf_prog_put(prog);
|
* rip everything out. Hence, below notifications must
|
||||||
return err;
|
* happen before bpf_prog_new_fd().
|
||||||
}
|
*
|
||||||
|
* Also, any failure handling from this point onwards must
|
||||||
|
* be using bpf_prog_put() given the program is exposed.
|
||||||
|
*/
|
||||||
bpf_prog_kallsyms_add(prog);
|
bpf_prog_kallsyms_add(prog);
|
||||||
perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
|
perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
|
||||||
|
|
||||||
|
err = bpf_prog_new_fd(prog);
|
||||||
|
if (err < 0)
|
||||||
|
bpf_prog_put(prog);
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
free_used_maps:
|
free_used_maps:
|
||||||
|
|
|
@ -985,9 +985,6 @@ static void __mark_reg_unbounded(struct bpf_reg_state *reg)
|
||||||
reg->smax_value = S64_MAX;
|
reg->smax_value = S64_MAX;
|
||||||
reg->umin_value = 0;
|
reg->umin_value = 0;
|
||||||
reg->umax_value = U64_MAX;
|
reg->umax_value = U64_MAX;
|
||||||
|
|
||||||
/* constant backtracking is enabled for root only for now */
|
|
||||||
reg->precise = capable(CAP_SYS_ADMIN) ? false : true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Mark a register as having a completely unknown (scalar) value. */
|
/* Mark a register as having a completely unknown (scalar) value. */
|
||||||
|
@ -1014,7 +1011,11 @@ static void mark_reg_unknown(struct bpf_verifier_env *env,
|
||||||
__mark_reg_not_init(regs + regno);
|
__mark_reg_not_init(regs + regno);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
__mark_reg_unknown(regs + regno);
|
regs += regno;
|
||||||
|
__mark_reg_unknown(regs);
|
||||||
|
/* constant backtracking is enabled for root without bpf2bpf calls */
|
||||||
|
regs->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
|
||||||
|
true : false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __mark_reg_not_init(struct bpf_reg_state *reg)
|
static void __mark_reg_not_init(struct bpf_reg_state *reg)
|
||||||
|
|
|
@ -164,7 +164,7 @@ batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype)
|
||||||
{
|
{
|
||||||
struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype);
|
struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype);
|
||||||
|
|
||||||
return attr ? nla_get_u32(attr) : 0;
|
return (attr && nla_len(attr) == sizeof(u32)) ? nla_get_u32(attr) : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -221,7 +221,7 @@ unsigned int ebt_do_table(struct sk_buff *skb,
|
||||||
return NF_DROP;
|
return NF_DROP;
|
||||||
}
|
}
|
||||||
|
|
||||||
ADD_COUNTER(*(counter_base + i), 1, skb->len);
|
ADD_COUNTER(*(counter_base + i), skb->len, 1);
|
||||||
|
|
||||||
/* these should only watch: not modify, nor tell us
|
/* these should only watch: not modify, nor tell us
|
||||||
* what to do with the packet
|
* what to do with the packet
|
||||||
|
@ -959,8 +959,8 @@ static void get_counters(const struct ebt_counter *oldcounters,
|
||||||
continue;
|
continue;
|
||||||
counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
|
counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
|
||||||
for (i = 0; i < nentries; i++)
|
for (i = 0; i < nentries; i++)
|
||||||
ADD_COUNTER(counters[i], counter_base[i].pcnt,
|
ADD_COUNTER(counters[i], counter_base[i].bcnt,
|
||||||
counter_base[i].bcnt);
|
counter_base[i].pcnt);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1280,7 +1280,7 @@ static int do_update_counters(struct net *net, const char *name,
|
||||||
|
|
||||||
/* we add to the counters of the first cpu */
|
/* we add to the counters of the first cpu */
|
||||||
for (i = 0; i < num_counters; i++)
|
for (i = 0; i < num_counters; i++)
|
||||||
ADD_COUNTER(t->private->counters[i], tmp[i].pcnt, tmp[i].bcnt);
|
ADD_COUNTER(t->private->counters[i], tmp[i].bcnt, tmp[i].pcnt);
|
||||||
|
|
||||||
write_unlock_bh(&t->lock);
|
write_unlock_bh(&t->lock);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
|
@ -8757,13 +8757,13 @@ sk_reuseport_is_valid_access(int off, int size,
|
||||||
return size == size_default;
|
return size == size_default;
|
||||||
|
|
||||||
/* Fields that allow narrowing */
|
/* Fields that allow narrowing */
|
||||||
case offsetof(struct sk_reuseport_md, eth_protocol):
|
case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
|
||||||
if (size < FIELD_SIZEOF(struct sk_buff, protocol))
|
if (size < FIELD_SIZEOF(struct sk_buff, protocol))
|
||||||
return false;
|
return false;
|
||||||
/* fall through */
|
/* fall through */
|
||||||
case offsetof(struct sk_reuseport_md, ip_protocol):
|
case bpf_ctx_range(struct sk_reuseport_md, ip_protocol):
|
||||||
case offsetof(struct sk_reuseport_md, bind_inany):
|
case bpf_ctx_range(struct sk_reuseport_md, bind_inany):
|
||||||
case offsetof(struct sk_reuseport_md, len):
|
case bpf_ctx_range(struct sk_reuseport_md, len):
|
||||||
bpf_ctx_record_field_size(info, size_default);
|
bpf_ctx_record_field_size(info, size_default);
|
||||||
return bpf_ctx_narrow_access_ok(off, size, size_default);
|
return bpf_ctx_narrow_access_ok(off, size, size_default);
|
||||||
|
|
||||||
|
|
|
@ -142,8 +142,8 @@ int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
|
||||||
mutex_unlock(&flow_dissector_mutex);
|
mutex_unlock(&flow_dissector_mutex);
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
bpf_prog_put(attached);
|
|
||||||
RCU_INIT_POINTER(net->flow_dissector_prog, NULL);
|
RCU_INIT_POINTER(net->flow_dissector_prog, NULL);
|
||||||
|
bpf_prog_put(attached);
|
||||||
mutex_unlock(&flow_dissector_mutex);
|
mutex_unlock(&flow_dissector_mutex);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -3287,16 +3287,17 @@ static __init int net_inuse_init(void)
|
||||||
|
|
||||||
core_initcall(net_inuse_init);
|
core_initcall(net_inuse_init);
|
||||||
|
|
||||||
static void assign_proto_idx(struct proto *prot)
|
static int assign_proto_idx(struct proto *prot)
|
||||||
{
|
{
|
||||||
prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
|
prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
|
||||||
|
|
||||||
if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
|
if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
|
||||||
pr_err("PROTO_INUSE_NR exhausted\n");
|
pr_err("PROTO_INUSE_NR exhausted\n");
|
||||||
return;
|
return -ENOSPC;
|
||||||
}
|
}
|
||||||
|
|
||||||
set_bit(prot->inuse_idx, proto_inuse_idx);
|
set_bit(prot->inuse_idx, proto_inuse_idx);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void release_proto_idx(struct proto *prot)
|
static void release_proto_idx(struct proto *prot)
|
||||||
|
@ -3305,8 +3306,9 @@ static void release_proto_idx(struct proto *prot)
|
||||||
clear_bit(prot->inuse_idx, proto_inuse_idx);
|
clear_bit(prot->inuse_idx, proto_inuse_idx);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline void assign_proto_idx(struct proto *prot)
|
static inline int assign_proto_idx(struct proto *prot)
|
||||||
{
|
{
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void release_proto_idx(struct proto *prot)
|
static inline void release_proto_idx(struct proto *prot)
|
||||||
|
@ -3355,6 +3357,8 @@ static int req_prot_init(const struct proto *prot)
|
||||||
|
|
||||||
int proto_register(struct proto *prot, int alloc_slab)
|
int proto_register(struct proto *prot, int alloc_slab)
|
||||||
{
|
{
|
||||||
|
int ret = -ENOBUFS;
|
||||||
|
|
||||||
if (alloc_slab) {
|
if (alloc_slab) {
|
||||||
prot->slab = kmem_cache_create_usercopy(prot->name,
|
prot->slab = kmem_cache_create_usercopy(prot->name,
|
||||||
prot->obj_size, 0,
|
prot->obj_size, 0,
|
||||||
|
@ -3391,20 +3395,27 @@ int proto_register(struct proto *prot, int alloc_slab)
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&proto_list_mutex);
|
mutex_lock(&proto_list_mutex);
|
||||||
|
ret = assign_proto_idx(prot);
|
||||||
|
if (ret) {
|
||||||
|
mutex_unlock(&proto_list_mutex);
|
||||||
|
goto out_free_timewait_sock_slab_name;
|
||||||
|
}
|
||||||
list_add(&prot->node, &proto_list);
|
list_add(&prot->node, &proto_list);
|
||||||
assign_proto_idx(prot);
|
|
||||||
mutex_unlock(&proto_list_mutex);
|
mutex_unlock(&proto_list_mutex);
|
||||||
return 0;
|
return ret;
|
||||||
|
|
||||||
out_free_timewait_sock_slab_name:
|
out_free_timewait_sock_slab_name:
|
||||||
kfree(prot->twsk_prot->twsk_slab_name);
|
if (alloc_slab && prot->twsk_prot)
|
||||||
|
kfree(prot->twsk_prot->twsk_slab_name);
|
||||||
out_free_request_sock_slab:
|
out_free_request_sock_slab:
|
||||||
req_prot_cleanup(prot->rsk_prot);
|
if (alloc_slab) {
|
||||||
|
req_prot_cleanup(prot->rsk_prot);
|
||||||
|
|
||||||
kmem_cache_destroy(prot->slab);
|
kmem_cache_destroy(prot->slab);
|
||||||
prot->slab = NULL;
|
prot->slab = NULL;
|
||||||
|
}
|
||||||
out:
|
out:
|
||||||
return -ENOBUFS;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(proto_register);
|
EXPORT_SYMBOL(proto_register);
|
||||||
|
|
||||||
|
|
|
@ -120,7 +120,6 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
|
||||||
int err = 0;
|
int err = 0;
|
||||||
long vm_wait = 0;
|
long vm_wait = 0;
|
||||||
long current_timeo = *timeo_p;
|
long current_timeo = *timeo_p;
|
||||||
bool noblock = (*timeo_p ? false : true);
|
|
||||||
DEFINE_WAIT_FUNC(wait, woken_wake_function);
|
DEFINE_WAIT_FUNC(wait, woken_wake_function);
|
||||||
|
|
||||||
if (sk_stream_memory_free(sk))
|
if (sk_stream_memory_free(sk))
|
||||||
|
@ -133,11 +132,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
|
||||||
|
|
||||||
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
|
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
|
||||||
goto do_error;
|
goto do_error;
|
||||||
if (!*timeo_p) {
|
if (!*timeo_p)
|
||||||
if (noblock)
|
goto do_eagain;
|
||||||
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
|
|
||||||
goto do_nonblock;
|
|
||||||
}
|
|
||||||
if (signal_pending(current))
|
if (signal_pending(current))
|
||||||
goto do_interrupted;
|
goto do_interrupted;
|
||||||
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
|
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
|
||||||
|
@ -169,7 +165,13 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
|
||||||
do_error:
|
do_error:
|
||||||
err = -EPIPE;
|
err = -EPIPE;
|
||||||
goto out;
|
goto out;
|
||||||
do_nonblock:
|
do_eagain:
|
||||||
|
/* Make sure that whenever EAGAIN is returned, EPOLLOUT event can
|
||||||
|
* be generated later.
|
||||||
|
* When TCP receives ACK packets that make room, tcp_check_space()
|
||||||
|
* only calls tcp_new_space() if SOCK_NOSPACE is set.
|
||||||
|
*/
|
||||||
|
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
|
||||||
err = -EAGAIN;
|
err = -EAGAIN;
|
||||||
goto out;
|
goto out;
|
||||||
do_interrupted:
|
do_interrupted:
|
||||||
|
|
|
@ -1092,7 +1092,7 @@ static struct packet_type ieee802154_packet_type = {
|
||||||
|
|
||||||
static int __init af_ieee802154_init(void)
|
static int __init af_ieee802154_init(void)
|
||||||
{
|
{
|
||||||
int rc = -EINVAL;
|
int rc;
|
||||||
|
|
||||||
rc = proto_register(&ieee802154_raw_prot, 1);
|
rc = proto_register(&ieee802154_raw_prot, 1);
|
||||||
if (rc)
|
if (rc)
|
||||||
|
|
|
@ -2145,7 +2145,7 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
|
||||||
|
|
||||||
if (filter->dump_exceptions) {
|
if (filter->dump_exceptions) {
|
||||||
err = fib_dump_info_fnhe(skb, cb, tb->tb_id, fi,
|
err = fib_dump_info_fnhe(skb, cb, tb->tb_id, fi,
|
||||||
&i_fa, s_fa);
|
&i_fa, s_fa, flags);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto stop;
|
goto stop;
|
||||||
}
|
}
|
||||||
|
|
|
@ -582,7 +582,13 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
|
||||||
|
|
||||||
if (!rt)
|
if (!rt)
|
||||||
goto out;
|
goto out;
|
||||||
net = dev_net(rt->dst.dev);
|
|
||||||
|
if (rt->dst.dev)
|
||||||
|
net = dev_net(rt->dst.dev);
|
||||||
|
else if (skb_in->dev)
|
||||||
|
net = dev_net(skb_in->dev);
|
||||||
|
else
|
||||||
|
goto out;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find the original header. It is expected to be valid, of course.
|
* Find the original header. It is expected to be valid, of course.
|
||||||
|
@ -902,7 +908,7 @@ static bool icmp_redirect(struct sk_buff *skb)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
icmp_socket_deliver(skb, icmp_hdr(skb)->un.gateway);
|
icmp_socket_deliver(skb, ntohl(icmp_hdr(skb)->un.gateway));
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1475,7 +1475,7 @@ EXPORT_SYMBOL(__ip_mc_inc_group);
|
||||||
|
|
||||||
void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
|
void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
|
||||||
{
|
{
|
||||||
__ip_mc_inc_group(in_dev, addr, MCAST_EXCLUDE);
|
__ip_mc_inc_group(in_dev, addr, GFP_KERNEL);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ip_mc_inc_group);
|
EXPORT_SYMBOL(ip_mc_inc_group);
|
||||||
|
|
||||||
|
@ -2197,7 +2197,7 @@ static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr,
|
||||||
iml->sflist = NULL;
|
iml->sflist = NULL;
|
||||||
iml->sfmode = mode;
|
iml->sfmode = mode;
|
||||||
rcu_assign_pointer(inet->mc_list, iml);
|
rcu_assign_pointer(inet->mc_list, iml);
|
||||||
__ip_mc_inc_group(in_dev, addr, mode);
|
____ip_mc_inc_group(in_dev, addr, mode, GFP_KERNEL);
|
||||||
err = 0;
|
err = 0;
|
||||||
done:
|
done:
|
||||||
return err;
|
return err;
|
||||||
|
|
|
@ -2728,7 +2728,8 @@ EXPORT_SYMBOL_GPL(ip_route_output_flow);
|
||||||
/* called with rcu_read_lock held */
|
/* called with rcu_read_lock held */
|
||||||
static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
|
static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
|
||||||
struct rtable *rt, u32 table_id, struct flowi4 *fl4,
|
struct rtable *rt, u32 table_id, struct flowi4 *fl4,
|
||||||
struct sk_buff *skb, u32 portid, u32 seq)
|
struct sk_buff *skb, u32 portid, u32 seq,
|
||||||
|
unsigned int flags)
|
||||||
{
|
{
|
||||||
struct rtmsg *r;
|
struct rtmsg *r;
|
||||||
struct nlmsghdr *nlh;
|
struct nlmsghdr *nlh;
|
||||||
|
@ -2736,7 +2737,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
|
||||||
u32 error;
|
u32 error;
|
||||||
u32 metrics[RTAX_MAX];
|
u32 metrics[RTAX_MAX];
|
||||||
|
|
||||||
nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0);
|
nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), flags);
|
||||||
if (!nlh)
|
if (!nlh)
|
||||||
return -EMSGSIZE;
|
return -EMSGSIZE;
|
||||||
|
|
||||||
|
@ -2860,7 +2861,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
|
||||||
static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
|
static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
|
||||||
struct netlink_callback *cb, u32 table_id,
|
struct netlink_callback *cb, u32 table_id,
|
||||||
struct fnhe_hash_bucket *bucket, int genid,
|
struct fnhe_hash_bucket *bucket, int genid,
|
||||||
int *fa_index, int fa_start)
|
int *fa_index, int fa_start, unsigned int flags)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -2891,7 +2892,7 @@ static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
|
||||||
err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
|
err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
|
||||||
table_id, NULL, skb,
|
table_id, NULL, skb,
|
||||||
NETLINK_CB(cb->skb).portid,
|
NETLINK_CB(cb->skb).portid,
|
||||||
cb->nlh->nlmsg_seq);
|
cb->nlh->nlmsg_seq, flags);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
next:
|
next:
|
||||||
|
@ -2904,7 +2905,7 @@ static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
|
||||||
|
|
||||||
int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
|
int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
|
||||||
u32 table_id, struct fib_info *fi,
|
u32 table_id, struct fib_info *fi,
|
||||||
int *fa_index, int fa_start)
|
int *fa_index, int fa_start, unsigned int flags)
|
||||||
{
|
{
|
||||||
struct net *net = sock_net(cb->skb->sk);
|
struct net *net = sock_net(cb->skb->sk);
|
||||||
int nhsel, genid = fnhe_genid(net);
|
int nhsel, genid = fnhe_genid(net);
|
||||||
|
@ -2922,7 +2923,8 @@ int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
|
||||||
err = 0;
|
err = 0;
|
||||||
if (bucket)
|
if (bucket)
|
||||||
err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
|
err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
|
||||||
genid, fa_index, fa_start);
|
genid, fa_index, fa_start,
|
||||||
|
flags);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
@ -3183,7 +3185,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
|
||||||
fl4.flowi4_tos, res.fi, 0);
|
fl4.flowi4_tos, res.fi, 0);
|
||||||
} else {
|
} else {
|
||||||
err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
|
err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
|
||||||
NETLINK_CB(in_skb).portid, nlh->nlmsg_seq);
|
NETLINK_CB(in_skb).portid,
|
||||||
|
nlh->nlmsg_seq, 0);
|
||||||
}
|
}
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto errout_rcu;
|
goto errout_rcu;
|
||||||
|
|
|
@ -478,7 +478,7 @@ static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
|
||||||
if (!idev) {
|
if (!idev) {
|
||||||
idev = ipv6_add_dev(dev);
|
idev = ipv6_add_dev(dev);
|
||||||
if (IS_ERR(idev))
|
if (IS_ERR(idev))
|
||||||
return NULL;
|
return idev;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dev->flags&IFF_UP)
|
if (dev->flags&IFF_UP)
|
||||||
|
@ -1045,7 +1045,8 @@ ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg,
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
if (addr_type == IPV6_ADDR_ANY ||
|
if (addr_type == IPV6_ADDR_ANY ||
|
||||||
addr_type & IPV6_ADDR_MULTICAST ||
|
(addr_type & IPV6_ADDR_MULTICAST &&
|
||||||
|
!(cfg->ifa_flags & IFA_F_MCAUTOJOIN)) ||
|
||||||
(!(idev->dev->flags & IFF_LOOPBACK) &&
|
(!(idev->dev->flags & IFF_LOOPBACK) &&
|
||||||
!netif_is_l3_master(idev->dev) &&
|
!netif_is_l3_master(idev->dev) &&
|
||||||
addr_type & IPV6_ADDR_LOOPBACK))
|
addr_type & IPV6_ADDR_LOOPBACK))
|
||||||
|
@ -2465,8 +2466,8 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
|
||||||
ASSERT_RTNL();
|
ASSERT_RTNL();
|
||||||
|
|
||||||
idev = ipv6_find_idev(dev);
|
idev = ipv6_find_idev(dev);
|
||||||
if (!idev)
|
if (IS_ERR(idev))
|
||||||
return ERR_PTR(-ENOBUFS);
|
return idev;
|
||||||
|
|
||||||
if (idev->cnf.disable_ipv6)
|
if (idev->cnf.disable_ipv6)
|
||||||
return ERR_PTR(-EACCES);
|
return ERR_PTR(-EACCES);
|
||||||
|
@ -3158,7 +3159,7 @@ static void init_loopback(struct net_device *dev)
|
||||||
ASSERT_RTNL();
|
ASSERT_RTNL();
|
||||||
|
|
||||||
idev = ipv6_find_idev(dev);
|
idev = ipv6_find_idev(dev);
|
||||||
if (!idev) {
|
if (IS_ERR(idev)) {
|
||||||
pr_debug("%s: add_dev failed\n", __func__);
|
pr_debug("%s: add_dev failed\n", __func__);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -3373,7 +3374,7 @@ static void addrconf_sit_config(struct net_device *dev)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
idev = ipv6_find_idev(dev);
|
idev = ipv6_find_idev(dev);
|
||||||
if (!idev) {
|
if (IS_ERR(idev)) {
|
||||||
pr_debug("%s: add_dev failed\n", __func__);
|
pr_debug("%s: add_dev failed\n", __func__);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -3398,7 +3399,7 @@ static void addrconf_gre_config(struct net_device *dev)
|
||||||
ASSERT_RTNL();
|
ASSERT_RTNL();
|
||||||
|
|
||||||
idev = ipv6_find_idev(dev);
|
idev = ipv6_find_idev(dev);
|
||||||
if (!idev) {
|
if (IS_ERR(idev)) {
|
||||||
pr_debug("%s: add_dev failed\n", __func__);
|
pr_debug("%s: add_dev failed\n", __func__);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -4772,8 +4773,8 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||||
IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
|
IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
|
||||||
|
|
||||||
idev = ipv6_find_idev(dev);
|
idev = ipv6_find_idev(dev);
|
||||||
if (!idev)
|
if (IS_ERR(idev))
|
||||||
return -ENOBUFS;
|
return PTR_ERR(idev);
|
||||||
|
|
||||||
if (!ipv6_allow_optimistic_dad(net, idev))
|
if (!ipv6_allow_optimistic_dad(net, idev))
|
||||||
cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
|
cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
|
||||||
|
|
|
@ -1546,6 +1546,11 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
|
||||||
if (is_multicast_ether_addr(mac))
|
if (is_multicast_ether_addr(mac))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER) &&
|
||||||
|
sdata->vif.type == NL80211_IFTYPE_STATION &&
|
||||||
|
!sdata->u.mgd.associated)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
|
sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
|
||||||
if (!sta)
|
if (!sta)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -1553,10 +1558,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
|
||||||
if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
|
if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
|
||||||
sta->sta.tdls = true;
|
sta->sta.tdls = true;
|
||||||
|
|
||||||
if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
|
|
||||||
!sdata->u.mgd.associated)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
err = sta_apply_parameters(local, sta, params);
|
err = sta_apply_parameters(local, sta, params);
|
||||||
if (err) {
|
if (err) {
|
||||||
sta_info_free(local, sta);
|
sta_info_free(local, sta);
|
||||||
|
|
|
@ -133,12 +133,12 @@ static int mpls_xmit(struct sk_buff *skb)
|
||||||
mpls_stats_inc_outucastpkts(out_dev, skb);
|
mpls_stats_inc_outucastpkts(out_dev, skb);
|
||||||
|
|
||||||
if (rt) {
|
if (rt) {
|
||||||
if (rt->rt_gw_family == AF_INET)
|
if (rt->rt_gw_family == AF_INET6)
|
||||||
err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gw4,
|
|
||||||
skb);
|
|
||||||
else if (rt->rt_gw_family == AF_INET6)
|
|
||||||
err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt->rt_gw6,
|
err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt->rt_gw6,
|
||||||
skb);
|
skb);
|
||||||
|
else
|
||||||
|
err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gw4,
|
||||||
|
skb);
|
||||||
} else if (rt6) {
|
} else if (rt6) {
|
||||||
if (ipv6_addr_v4mapped(&rt6->rt6i_gateway)) {
|
if (ipv6_addr_v4mapped(&rt6->rt6i_gateway)) {
|
||||||
/* 6PE (RFC 4798) */
|
/* 6PE (RFC 4798) */
|
||||||
|
|
|
@ -54,7 +54,7 @@ static void ncsi_cmd_build_header(struct ncsi_pkt_hdr *h,
|
||||||
checksum = ncsi_calculate_checksum((unsigned char *)h,
|
checksum = ncsi_calculate_checksum((unsigned char *)h,
|
||||||
sizeof(*h) + nca->payload);
|
sizeof(*h) + nca->payload);
|
||||||
pchecksum = (__be32 *)((void *)h + sizeof(struct ncsi_pkt_hdr) +
|
pchecksum = (__be32 *)((void *)h + sizeof(struct ncsi_pkt_hdr) +
|
||||||
nca->payload);
|
ALIGN(nca->payload, 4));
|
||||||
*pchecksum = htonl(checksum);
|
*pchecksum = htonl(checksum);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -309,14 +309,21 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
|
||||||
|
|
||||||
int ncsi_xmit_cmd(struct ncsi_cmd_arg *nca)
|
int ncsi_xmit_cmd(struct ncsi_cmd_arg *nca)
|
||||||
{
|
{
|
||||||
struct ncsi_request *nr;
|
|
||||||
struct ethhdr *eh;
|
|
||||||
struct ncsi_cmd_handler *nch = NULL;
|
struct ncsi_cmd_handler *nch = NULL;
|
||||||
|
struct ncsi_request *nr;
|
||||||
|
unsigned char type;
|
||||||
|
struct ethhdr *eh;
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
|
/* Use OEM generic handler for Netlink request */
|
||||||
|
if (nca->req_flags == NCSI_REQ_FLAG_NETLINK_DRIVEN)
|
||||||
|
type = NCSI_PKT_CMD_OEM;
|
||||||
|
else
|
||||||
|
type = nca->type;
|
||||||
|
|
||||||
/* Search for the handler */
|
/* Search for the handler */
|
||||||
for (i = 0; i < ARRAY_SIZE(ncsi_cmd_handlers); i++) {
|
for (i = 0; i < ARRAY_SIZE(ncsi_cmd_handlers); i++) {
|
||||||
if (ncsi_cmd_handlers[i].type == nca->type) {
|
if (ncsi_cmd_handlers[i].type == type) {
|
||||||
if (ncsi_cmd_handlers[i].handler)
|
if (ncsi_cmd_handlers[i].handler)
|
||||||
nch = &ncsi_cmd_handlers[i];
|
nch = &ncsi_cmd_handlers[i];
|
||||||
else
|
else
|
||||||
|
|
|
@ -47,7 +47,8 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
|
||||||
if (ntohs(h->code) != NCSI_PKT_RSP_C_COMPLETED ||
|
if (ntohs(h->code) != NCSI_PKT_RSP_C_COMPLETED ||
|
||||||
ntohs(h->reason) != NCSI_PKT_RSP_R_NO_ERROR) {
|
ntohs(h->reason) != NCSI_PKT_RSP_R_NO_ERROR) {
|
||||||
netdev_dbg(nr->ndp->ndev.dev,
|
netdev_dbg(nr->ndp->ndev.dev,
|
||||||
"NCSI: non zero response/reason code\n");
|
"NCSI: non zero response/reason code %04xh, %04xh\n",
|
||||||
|
ntohs(h->code), ntohs(h->reason));
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -55,7 +56,7 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
|
||||||
* sender doesn't support checksum according to NCSI
|
* sender doesn't support checksum according to NCSI
|
||||||
* specification.
|
* specification.
|
||||||
*/
|
*/
|
||||||
pchecksum = (__be32 *)((void *)(h + 1) + payload - 4);
|
pchecksum = (__be32 *)((void *)(h + 1) + ALIGN(payload, 4) - 4);
|
||||||
if (ntohl(*pchecksum) == 0)
|
if (ntohl(*pchecksum) == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -63,7 +64,9 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
|
||||||
sizeof(*h) + payload - 4);
|
sizeof(*h) + payload - 4);
|
||||||
|
|
||||||
if (*pchecksum != htonl(checksum)) {
|
if (*pchecksum != htonl(checksum)) {
|
||||||
netdev_dbg(nr->ndp->ndev.dev, "NCSI: checksum mismatched\n");
|
netdev_dbg(nr->ndp->ndev.dev,
|
||||||
|
"NCSI: checksum mismatched; recd: %08x calc: %08x\n",
|
||||||
|
*pchecksum, htonl(checksum));
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -149,6 +149,11 @@ static int nft_flow_offload_validate(const struct nft_ctx *ctx,
|
||||||
return nft_chain_validate_hooks(ctx->chain, hook_mask);
|
return nft_chain_validate_hooks(ctx->chain, hook_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const struct nla_policy nft_flow_offload_policy[NFTA_FLOW_MAX + 1] = {
|
||||||
|
[NFTA_FLOW_TABLE_NAME] = { .type = NLA_STRING,
|
||||||
|
.len = NFT_NAME_MAXLEN - 1 },
|
||||||
|
};
|
||||||
|
|
||||||
static int nft_flow_offload_init(const struct nft_ctx *ctx,
|
static int nft_flow_offload_init(const struct nft_ctx *ctx,
|
||||||
const struct nft_expr *expr,
|
const struct nft_expr *expr,
|
||||||
const struct nlattr * const tb[])
|
const struct nlattr * const tb[])
|
||||||
|
@ -207,6 +212,7 @@ static const struct nft_expr_ops nft_flow_offload_ops = {
|
||||||
static struct nft_expr_type nft_flow_offload_type __read_mostly = {
|
static struct nft_expr_type nft_flow_offload_type __read_mostly = {
|
||||||
.name = "flow_offload",
|
.name = "flow_offload",
|
||||||
.ops = &nft_flow_offload_ops,
|
.ops = &nft_flow_offload_ops,
|
||||||
|
.policy = nft_flow_offload_policy,
|
||||||
.maxattr = NFTA_FLOW_MAX,
|
.maxattr = NFTA_FLOW_MAX,
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
|
@ -54,25 +54,39 @@ nfacct_mt_destroy(const struct xt_mtdtor_param *par)
|
||||||
nfnl_acct_put(info->nfacct);
|
nfnl_acct_put(info->nfacct);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct xt_match nfacct_mt_reg __read_mostly = {
|
static struct xt_match nfacct_mt_reg[] __read_mostly = {
|
||||||
.name = "nfacct",
|
{
|
||||||
.family = NFPROTO_UNSPEC,
|
.name = "nfacct",
|
||||||
.checkentry = nfacct_mt_checkentry,
|
.revision = 0,
|
||||||
.match = nfacct_mt,
|
.family = NFPROTO_UNSPEC,
|
||||||
.destroy = nfacct_mt_destroy,
|
.checkentry = nfacct_mt_checkentry,
|
||||||
.matchsize = sizeof(struct xt_nfacct_match_info),
|
.match = nfacct_mt,
|
||||||
.usersize = offsetof(struct xt_nfacct_match_info, nfacct),
|
.destroy = nfacct_mt_destroy,
|
||||||
.me = THIS_MODULE,
|
.matchsize = sizeof(struct xt_nfacct_match_info),
|
||||||
|
.usersize = offsetof(struct xt_nfacct_match_info, nfacct),
|
||||||
|
.me = THIS_MODULE,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.name = "nfacct",
|
||||||
|
.revision = 1,
|
||||||
|
.family = NFPROTO_UNSPEC,
|
||||||
|
.checkentry = nfacct_mt_checkentry,
|
||||||
|
.match = nfacct_mt,
|
||||||
|
.destroy = nfacct_mt_destroy,
|
||||||
|
.matchsize = sizeof(struct xt_nfacct_match_info_v1),
|
||||||
|
.usersize = offsetof(struct xt_nfacct_match_info_v1, nfacct),
|
||||||
|
.me = THIS_MODULE,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init nfacct_mt_init(void)
|
static int __init nfacct_mt_init(void)
|
||||||
{
|
{
|
||||||
return xt_register_match(&nfacct_mt_reg);
|
return xt_register_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit nfacct_mt_exit(void)
|
static void __exit nfacct_mt_exit(void)
|
||||||
{
|
{
|
||||||
xt_unregister_match(&nfacct_mt_reg);
|
xt_unregister_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
|
||||||
}
|
}
|
||||||
|
|
||||||
module_init(nfacct_mt_init);
|
module_init(nfacct_mt_init);
|
||||||
|
|
|
@ -67,6 +67,7 @@ struct ovs_conntrack_info {
|
||||||
struct md_mark mark;
|
struct md_mark mark;
|
||||||
struct md_labels labels;
|
struct md_labels labels;
|
||||||
char timeout[CTNL_TIMEOUT_NAME_MAX];
|
char timeout[CTNL_TIMEOUT_NAME_MAX];
|
||||||
|
struct nf_ct_timeout *nf_ct_timeout;
|
||||||
#if IS_ENABLED(CONFIG_NF_NAT)
|
#if IS_ENABLED(CONFIG_NF_NAT)
|
||||||
struct nf_nat_range2 range; /* Only present for SRC NAT and DST NAT. */
|
struct nf_nat_range2 range; /* Only present for SRC NAT and DST NAT. */
|
||||||
#endif
|
#endif
|
||||||
|
@ -697,6 +698,14 @@ static bool skb_nfct_cached(struct net *net,
|
||||||
if (help && rcu_access_pointer(help->helper) != info->helper)
|
if (help && rcu_access_pointer(help->helper) != info->helper)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
if (info->nf_ct_timeout) {
|
||||||
|
struct nf_conn_timeout *timeout_ext;
|
||||||
|
|
||||||
|
timeout_ext = nf_ct_timeout_find(ct);
|
||||||
|
if (!timeout_ext || info->nf_ct_timeout !=
|
||||||
|
rcu_dereference(timeout_ext->timeout))
|
||||||
|
return false;
|
||||||
|
}
|
||||||
/* Force conntrack entry direction to the current packet? */
|
/* Force conntrack entry direction to the current packet? */
|
||||||
if (info->force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
|
if (info->force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
|
||||||
/* Delete the conntrack entry if confirmed, else just release
|
/* Delete the conntrack entry if confirmed, else just release
|
||||||
|
@ -1565,7 +1574,7 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
|
||||||
case OVS_CT_ATTR_TIMEOUT:
|
case OVS_CT_ATTR_TIMEOUT:
|
||||||
memcpy(info->timeout, nla_data(a), nla_len(a));
|
memcpy(info->timeout, nla_data(a), nla_len(a));
|
||||||
if (!memchr(info->timeout, '\0', nla_len(a))) {
|
if (!memchr(info->timeout, '\0', nla_len(a))) {
|
||||||
OVS_NLERR(log, "Invalid conntrack helper");
|
OVS_NLERR(log, "Invalid conntrack timeout");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -1657,6 +1666,10 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
|
||||||
ct_info.timeout))
|
ct_info.timeout))
|
||||||
pr_info_ratelimited("Failed to associated timeout "
|
pr_info_ratelimited("Failed to associated timeout "
|
||||||
"policy `%s'\n", ct_info.timeout);
|
"policy `%s'\n", ct_info.timeout);
|
||||||
|
else
|
||||||
|
ct_info.nf_ct_timeout = rcu_dereference(
|
||||||
|
nf_ct_timeout_find(ct_info.ct)->timeout);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (helper) {
|
if (helper) {
|
||||||
|
|
16
net/rds/ib.c
16
net/rds/ib.c
|
@ -291,7 +291,7 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
|
||||||
void *buffer)
|
void *buffer)
|
||||||
{
|
{
|
||||||
struct rds_info_rdma_connection *iinfo = buffer;
|
struct rds_info_rdma_connection *iinfo = buffer;
|
||||||
struct rds_ib_connection *ic;
|
struct rds_ib_connection *ic = conn->c_transport_data;
|
||||||
|
|
||||||
/* We will only ever look at IB transports */
|
/* We will only ever look at IB transports */
|
||||||
if (conn->c_trans != &rds_ib_transport)
|
if (conn->c_trans != &rds_ib_transport)
|
||||||
|
@ -301,15 +301,16 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
|
||||||
|
|
||||||
iinfo->src_addr = conn->c_laddr.s6_addr32[3];
|
iinfo->src_addr = conn->c_laddr.s6_addr32[3];
|
||||||
iinfo->dst_addr = conn->c_faddr.s6_addr32[3];
|
iinfo->dst_addr = conn->c_faddr.s6_addr32[3];
|
||||||
iinfo->tos = conn->c_tos;
|
if (ic) {
|
||||||
|
iinfo->tos = conn->c_tos;
|
||||||
|
iinfo->sl = ic->i_sl;
|
||||||
|
}
|
||||||
|
|
||||||
memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid));
|
memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid));
|
||||||
memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
|
memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
|
||||||
if (rds_conn_state(conn) == RDS_CONN_UP) {
|
if (rds_conn_state(conn) == RDS_CONN_UP) {
|
||||||
struct rds_ib_device *rds_ibdev;
|
struct rds_ib_device *rds_ibdev;
|
||||||
|
|
||||||
ic = conn->c_transport_data;
|
|
||||||
|
|
||||||
rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo->src_gid,
|
rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo->src_gid,
|
||||||
(union ib_gid *)&iinfo->dst_gid);
|
(union ib_gid *)&iinfo->dst_gid);
|
||||||
|
|
||||||
|
@ -329,7 +330,7 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
|
||||||
void *buffer)
|
void *buffer)
|
||||||
{
|
{
|
||||||
struct rds6_info_rdma_connection *iinfo6 = buffer;
|
struct rds6_info_rdma_connection *iinfo6 = buffer;
|
||||||
struct rds_ib_connection *ic;
|
struct rds_ib_connection *ic = conn->c_transport_data;
|
||||||
|
|
||||||
/* We will only ever look at IB transports */
|
/* We will only ever look at IB transports */
|
||||||
if (conn->c_trans != &rds_ib_transport)
|
if (conn->c_trans != &rds_ib_transport)
|
||||||
|
@ -337,6 +338,10 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
|
||||||
|
|
||||||
iinfo6->src_addr = conn->c_laddr;
|
iinfo6->src_addr = conn->c_laddr;
|
||||||
iinfo6->dst_addr = conn->c_faddr;
|
iinfo6->dst_addr = conn->c_faddr;
|
||||||
|
if (ic) {
|
||||||
|
iinfo6->tos = conn->c_tos;
|
||||||
|
iinfo6->sl = ic->i_sl;
|
||||||
|
}
|
||||||
|
|
||||||
memset(&iinfo6->src_gid, 0, sizeof(iinfo6->src_gid));
|
memset(&iinfo6->src_gid, 0, sizeof(iinfo6->src_gid));
|
||||||
memset(&iinfo6->dst_gid, 0, sizeof(iinfo6->dst_gid));
|
memset(&iinfo6->dst_gid, 0, sizeof(iinfo6->dst_gid));
|
||||||
|
@ -344,7 +349,6 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
|
||||||
if (rds_conn_state(conn) == RDS_CONN_UP) {
|
if (rds_conn_state(conn) == RDS_CONN_UP) {
|
||||||
struct rds_ib_device *rds_ibdev;
|
struct rds_ib_device *rds_ibdev;
|
||||||
|
|
||||||
ic = conn->c_transport_data;
|
|
||||||
rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid,
|
rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid,
|
||||||
(union ib_gid *)&iinfo6->dst_gid);
|
(union ib_gid *)&iinfo6->dst_gid);
|
||||||
rds_ibdev = ic->rds_ibdev;
|
rds_ibdev = ic->rds_ibdev;
|
||||||
|
|
|
@ -220,6 +220,7 @@ struct rds_ib_connection {
|
||||||
/* Send/Recv vectors */
|
/* Send/Recv vectors */
|
||||||
int i_scq_vector;
|
int i_scq_vector;
|
||||||
int i_rcq_vector;
|
int i_rcq_vector;
|
||||||
|
u8 i_sl;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* This assumes that atomic_t is at least 32 bits */
|
/* This assumes that atomic_t is at least 32 bits */
|
||||||
|
|
|
@ -152,6 +152,9 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
|
||||||
RDS_PROTOCOL_MINOR(conn->c_version),
|
RDS_PROTOCOL_MINOR(conn->c_version),
|
||||||
ic->i_flowctl ? ", flow control" : "");
|
ic->i_flowctl ? ", flow control" : "");
|
||||||
|
|
||||||
|
/* receive sl from the peer */
|
||||||
|
ic->i_sl = ic->i_cm_id->route.path_rec->sl;
|
||||||
|
|
||||||
atomic_set(&ic->i_cq_quiesce, 0);
|
atomic_set(&ic->i_cq_quiesce, 0);
|
||||||
|
|
||||||
/* Init rings and fill recv. this needs to wait until protocol
|
/* Init rings and fill recv. this needs to wait until protocol
|
||||||
|
|
|
@ -43,6 +43,9 @@ static struct rdma_cm_id *rds_rdma_listen_id;
|
||||||
static struct rdma_cm_id *rds6_rdma_listen_id;
|
static struct rdma_cm_id *rds6_rdma_listen_id;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* Per IB specification 7.7.3, service level is a 4-bit field. */
|
||||||
|
#define TOS_TO_SL(tos) ((tos) & 0xF)
|
||||||
|
|
||||||
static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
|
static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
|
||||||
struct rdma_cm_event *event,
|
struct rdma_cm_event *event,
|
||||||
bool isv6)
|
bool isv6)
|
||||||
|
@ -97,10 +100,13 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
|
||||||
struct rds_ib_connection *ibic;
|
struct rds_ib_connection *ibic;
|
||||||
|
|
||||||
ibic = conn->c_transport_data;
|
ibic = conn->c_transport_data;
|
||||||
if (ibic && ibic->i_cm_id == cm_id)
|
if (ibic && ibic->i_cm_id == cm_id) {
|
||||||
|
cm_id->route.path_rec[0].sl =
|
||||||
|
TOS_TO_SL(conn->c_tos);
|
||||||
ret = trans->cm_initiate_connect(cm_id, isv6);
|
ret = trans->cm_initiate_connect(cm_id, isv6);
|
||||||
else
|
} else {
|
||||||
rds_conn_drop(conn);
|
rds_conn_drop(conn);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|
|
@ -76,13 +76,11 @@ static int smc_tx_wait(struct smc_sock *smc, int flags)
|
||||||
DEFINE_WAIT_FUNC(wait, woken_wake_function);
|
DEFINE_WAIT_FUNC(wait, woken_wake_function);
|
||||||
struct smc_connection *conn = &smc->conn;
|
struct smc_connection *conn = &smc->conn;
|
||||||
struct sock *sk = &smc->sk;
|
struct sock *sk = &smc->sk;
|
||||||
bool noblock;
|
|
||||||
long timeo;
|
long timeo;
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
/* similar to sk_stream_wait_memory */
|
/* similar to sk_stream_wait_memory */
|
||||||
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
|
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
|
||||||
noblock = timeo ? false : true;
|
|
||||||
add_wait_queue(sk_sleep(sk), &wait);
|
add_wait_queue(sk_sleep(sk), &wait);
|
||||||
while (1) {
|
while (1) {
|
||||||
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
|
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
|
||||||
|
@ -97,8 +95,8 @@ static int smc_tx_wait(struct smc_sock *smc, int flags)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (!timeo) {
|
if (!timeo) {
|
||||||
if (noblock)
|
/* ensure EPOLLOUT is subsequently generated */
|
||||||
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
|
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
|
||||||
rc = -EAGAIN;
|
rc = -EAGAIN;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2788,7 +2788,7 @@ static void reg_process_pending_hints(void)
|
||||||
|
|
||||||
/* When last_request->processed becomes true this will be rescheduled */
|
/* When last_request->processed becomes true this will be rescheduled */
|
||||||
if (lr && !lr->processed) {
|
if (lr && !lr->processed) {
|
||||||
reg_process_hint(lr);
|
pr_debug("Pending regulatory request, waiting for it to be processed...\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -233,25 +233,30 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
|
||||||
|
|
||||||
switch (params->cipher) {
|
switch (params->cipher) {
|
||||||
case WLAN_CIPHER_SUITE_TKIP:
|
case WLAN_CIPHER_SUITE_TKIP:
|
||||||
|
/* Extended Key ID can only be used with CCMP/GCMP ciphers */
|
||||||
|
if ((pairwise && key_idx) ||
|
||||||
|
params->mode != NL80211_KEY_RX_TX)
|
||||||
|
return -EINVAL;
|
||||||
|
break;
|
||||||
case WLAN_CIPHER_SUITE_CCMP:
|
case WLAN_CIPHER_SUITE_CCMP:
|
||||||
case WLAN_CIPHER_SUITE_CCMP_256:
|
case WLAN_CIPHER_SUITE_CCMP_256:
|
||||||
case WLAN_CIPHER_SUITE_GCMP:
|
case WLAN_CIPHER_SUITE_GCMP:
|
||||||
case WLAN_CIPHER_SUITE_GCMP_256:
|
case WLAN_CIPHER_SUITE_GCMP_256:
|
||||||
/* IEEE802.11-2016 allows only 0 and - when using Extended Key
|
/* IEEE802.11-2016 allows only 0 and - when supporting
|
||||||
* ID - 1 as index for pairwise keys.
|
* Extended Key ID - 1 as index for pairwise keys.
|
||||||
* @NL80211_KEY_NO_TX is only allowed for pairwise keys when
|
* @NL80211_KEY_NO_TX is only allowed for pairwise keys when
|
||||||
* the driver supports Extended Key ID.
|
* the driver supports Extended Key ID.
|
||||||
* @NL80211_KEY_SET_TX can't be set when installing and
|
* @NL80211_KEY_SET_TX can't be set when installing and
|
||||||
* validating a key.
|
* validating a key.
|
||||||
*/
|
*/
|
||||||
if (params->mode == NL80211_KEY_NO_TX) {
|
if ((params->mode == NL80211_KEY_NO_TX && !pairwise) ||
|
||||||
if (!wiphy_ext_feature_isset(&rdev->wiphy,
|
params->mode == NL80211_KEY_SET_TX)
|
||||||
NL80211_EXT_FEATURE_EXT_KEY_ID))
|
return -EINVAL;
|
||||||
|
if (wiphy_ext_feature_isset(&rdev->wiphy,
|
||||||
|
NL80211_EXT_FEATURE_EXT_KEY_ID)) {
|
||||||
|
if (pairwise && (key_idx < 0 || key_idx > 1))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
else if (!pairwise || key_idx < 0 || key_idx > 1)
|
} else if (pairwise && key_idx) {
|
||||||
return -EINVAL;
|
|
||||||
} else if ((pairwise && key_idx) ||
|
|
||||||
params->mode == NL80211_KEY_SET_TX) {
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -365,7 +365,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
|
||||||
umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
|
umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
|
||||||
if (!umem->pages) {
|
if (!umem->pages) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto out_account;
|
goto out_pin;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < umem->npgs; i++)
|
for (i = 0; i < umem->npgs; i++)
|
||||||
|
@ -373,6 +373,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
out_pin:
|
||||||
|
xdp_umem_unpin_pages(umem);
|
||||||
out_account:
|
out_account:
|
||||||
xdp_umem_unaccount_pages(umem);
|
xdp_umem_unaccount_pages(umem);
|
||||||
return err;
|
return err;
|
||||||
|
|
|
@ -3269,7 +3269,7 @@ decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
|
||||||
struct flowi4 *fl4 = &fl->u.ip4;
|
struct flowi4 *fl4 = &fl->u.ip4;
|
||||||
int oif = 0;
|
int oif = 0;
|
||||||
|
|
||||||
if (skb_dst(skb))
|
if (skb_dst(skb) && skb_dst(skb)->dev)
|
||||||
oif = skb_dst(skb)->dev->ifindex;
|
oif = skb_dst(skb)->dev->ifindex;
|
||||||
|
|
||||||
memset(fl4, 0, sizeof(struct flowi4));
|
memset(fl4, 0, sizeof(struct flowi4));
|
||||||
|
@ -3387,7 +3387,7 @@ decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
|
||||||
|
|
||||||
nexthdr = nh[nhoff];
|
nexthdr = nh[nhoff];
|
||||||
|
|
||||||
if (skb_dst(skb))
|
if (skb_dst(skb) && skb_dst(skb)->dev)
|
||||||
oif = skb_dst(skb)->dev->ifindex;
|
oif = skb_dst(skb)->dev->ifindex;
|
||||||
|
|
||||||
memset(fl6, 0, sizeof(struct flowi6));
|
memset(fl6, 0, sizeof(struct flowi6));
|
||||||
|
|
|
@ -363,7 +363,9 @@ static int do_show(int argc, char **argv)
|
||||||
if (fd < 0)
|
if (fd < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
return show_prog(fd);
|
err = show_prog(fd);
|
||||||
|
close(fd);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (argc)
|
if (argc)
|
||||||
|
|
|
@ -34,6 +34,9 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test
|
||||||
BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c)))
|
BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c)))
|
||||||
TEST_GEN_FILES = $(BPF_OBJ_FILES)
|
TEST_GEN_FILES = $(BPF_OBJ_FILES)
|
||||||
|
|
||||||
|
BTF_C_FILES = $(wildcard progs/btf_dump_test_case_*.c)
|
||||||
|
TEST_FILES = $(BTF_C_FILES)
|
||||||
|
|
||||||
# Also test sub-register code-gen if LLVM has eBPF v3 processor support which
|
# Also test sub-register code-gen if LLVM has eBPF v3 processor support which
|
||||||
# contains both ALU32 and JMP32 instructions.
|
# contains both ALU32 and JMP32 instructions.
|
||||||
SUBREG_CODEGEN := $(shell echo "int cal(int a) { return a > 0; }" | \
|
SUBREG_CODEGEN := $(shell echo "int cal(int a) { return a > 0; }" | \
|
||||||
|
@ -68,7 +71,8 @@ TEST_PROGS := test_kmod.sh \
|
||||||
TEST_PROGS_EXTENDED := with_addr.sh \
|
TEST_PROGS_EXTENDED := with_addr.sh \
|
||||||
with_tunnels.sh \
|
with_tunnels.sh \
|
||||||
tcp_client.py \
|
tcp_client.py \
|
||||||
tcp_server.py
|
tcp_server.py \
|
||||||
|
test_xdp_vlan.sh
|
||||||
|
|
||||||
# Compile but not part of 'make run_tests'
|
# Compile but not part of 'make run_tests'
|
||||||
TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
|
TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
|
||||||
|
|
|
@ -34,3 +34,4 @@ CONFIG_NET_MPLS_GSO=m
|
||||||
CONFIG_MPLS_ROUTING=m
|
CONFIG_MPLS_ROUTING=m
|
||||||
CONFIG_MPLS_IPTUNNEL=m
|
CONFIG_MPLS_IPTUNNEL=m
|
||||||
CONFIG_IPV6_SIT=m
|
CONFIG_IPV6_SIT=m
|
||||||
|
CONFIG_BPF_JIT=y
|
||||||
|
|
|
@ -97,6 +97,13 @@ int test_btf_dump_case(int n, struct btf_dump_test_case *test_case)
|
||||||
}
|
}
|
||||||
|
|
||||||
snprintf(test_file, sizeof(test_file), "progs/%s.c", test_case->name);
|
snprintf(test_file, sizeof(test_file), "progs/%s.c", test_case->name);
|
||||||
|
if (access(test_file, R_OK) == -1)
|
||||||
|
/*
|
||||||
|
* When the test is run with O=, kselftest copies TEST_FILES
|
||||||
|
* without preserving the directory structure.
|
||||||
|
*/
|
||||||
|
snprintf(test_file, sizeof(test_file), "%s.c",
|
||||||
|
test_case->name);
|
||||||
/*
|
/*
|
||||||
* Diff test output and expected test output, contained between
|
* Diff test output and expected test output, contained between
|
||||||
* START-EXPECTED-OUTPUT and END-EXPECTED-OUTPUT lines in test case.
|
* START-EXPECTED-OUTPUT and END-EXPECTED-OUTPUT lines in test case.
|
||||||
|
|
|
@ -20,9 +20,9 @@ int main(int argc, char **argv)
|
||||||
BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
|
BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
|
||||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||||
BPF_FUNC_get_local_storage),
|
BPF_FUNC_get_local_storage),
|
||||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
|
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
|
||||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1),
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1),
|
||||||
BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
|
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
|
||||||
|
|
||||||
BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */
|
BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */
|
||||||
BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
|
BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
|
||||||
|
@ -30,7 +30,7 @@ int main(int argc, char **argv)
|
||||||
BPF_FUNC_get_local_storage),
|
BPF_FUNC_get_local_storage),
|
||||||
BPF_MOV64_IMM(BPF_REG_1, 1),
|
BPF_MOV64_IMM(BPF_REG_1, 1),
|
||||||
BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
|
BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
|
||||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
|
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
|
||||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1),
|
BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1),
|
||||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||||
BPF_EXIT_INSN(),
|
BPF_EXIT_INSN(),
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
#include <bpf/bpf.h>
|
#include <bpf/bpf.h>
|
||||||
|
|
||||||
#include "cgroup_helpers.h"
|
#include "cgroup_helpers.h"
|
||||||
|
#include "bpf_endian.h"
|
||||||
#include "bpf_rlimit.h"
|
#include "bpf_rlimit.h"
|
||||||
#include "bpf_util.h"
|
#include "bpf_util.h"
|
||||||
|
|
||||||
|
@ -232,7 +233,8 @@ static struct sock_test tests[] = {
|
||||||
/* if (ip == expected && port == expected) */
|
/* if (ip == expected && port == expected) */
|
||||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
|
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
|
||||||
offsetof(struct bpf_sock, src_ip6[3])),
|
offsetof(struct bpf_sock, src_ip6[3])),
|
||||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x01000000, 4),
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
|
||||||
|
__bpf_constant_ntohl(0x00000001), 4),
|
||||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
|
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
|
||||||
offsetof(struct bpf_sock, src_port)),
|
offsetof(struct bpf_sock, src_port)),
|
||||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2),
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2),
|
||||||
|
@ -261,7 +263,8 @@ static struct sock_test tests[] = {
|
||||||
/* if (ip == expected && port == expected) */
|
/* if (ip == expected && port == expected) */
|
||||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
|
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
|
||||||
offsetof(struct bpf_sock, src_ip4)),
|
offsetof(struct bpf_sock, src_ip4)),
|
||||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x0100007F, 4),
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
|
||||||
|
__bpf_constant_ntohl(0x7F000001), 4),
|
||||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
|
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
|
||||||
offsetof(struct bpf_sock, src_port)),
|
offsetof(struct bpf_sock, src_port)),
|
||||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2),
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2),
|
||||||
|
|
Loading…
Reference in New Issue