Merge ra.kernel.org:/pub/scm/linux/kernel/git/davem/net
This commit is contained in:
commit
6a92ef08a1
|
@ -5930,7 +5930,7 @@ F: Documentation/dev-tools/gcov.rst
|
|||
|
||||
GDB KERNEL DEBUGGING HELPER SCRIPTS
|
||||
M: Jan Kiszka <jan.kiszka@siemens.com>
|
||||
M: Kieran Bingham <kieran@bingham.xyz>
|
||||
M: Kieran Bingham <kbingham@kernel.org>
|
||||
S: Supported
|
||||
F: scripts/gdb/
|
||||
|
||||
|
|
|
@ -11,7 +11,6 @@ config PARISC
|
|||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_STRICT_KERNEL_RWX
|
||||
select ARCH_HAS_UBSAN_SANITIZE_ALL
|
||||
select ARCH_WANTS_UBSAN_NO_NULL
|
||||
select ARCH_SUPPORTS_MEMORY_FAILURE
|
||||
select RTC_CLASS
|
||||
select RTC_DRV_GENERIC
|
||||
|
|
|
@ -106,7 +106,6 @@ config S390
|
|||
select ARCH_USE_BUILTIN_BSWAP
|
||||
select ARCH_USE_CMPXCHG_LOCKREF
|
||||
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
|
||||
select ARCH_WANTS_UBSAN_NO_NULL
|
||||
select ARCH_WANT_IPC_PARSE_VERSION
|
||||
select BUILDTIME_EXTABLE_SORT
|
||||
select CLONE_BACKWARDS2
|
||||
|
|
|
@ -298,7 +298,8 @@ static void reset_bdev(struct zram *zram)
|
|||
zram->backing_dev = NULL;
|
||||
zram->old_block_size = 0;
|
||||
zram->bdev = NULL;
|
||||
|
||||
zram->disk->queue->backing_dev_info->capabilities |=
|
||||
BDI_CAP_SYNCHRONOUS_IO;
|
||||
kvfree(zram->bitmap);
|
||||
zram->bitmap = NULL;
|
||||
}
|
||||
|
@ -400,6 +401,18 @@ static ssize_t backing_dev_store(struct device *dev,
|
|||
zram->backing_dev = backing_dev;
|
||||
zram->bitmap = bitmap;
|
||||
zram->nr_pages = nr_pages;
|
||||
/*
|
||||
* With writeback feature, zram does asynchronous IO so it's no longer
|
||||
* synchronous device so let's remove synchronous io flag. Othewise,
|
||||
* upper layer(e.g., swap) could wait IO completion rather than
|
||||
* (submit and return), which will cause system sluggish.
|
||||
* Furthermore, when the IO function returns(e.g., swap_readpage),
|
||||
* upper layer expects IO was done so it could deallocate the page
|
||||
* freely but in fact, IO is going on so finally could cause
|
||||
* use-after-free when the IO is really done.
|
||||
*/
|
||||
zram->disk->queue->backing_dev_info->capabilities &=
|
||||
~BDI_CAP_SYNCHRONOUS_IO;
|
||||
up_write(&zram->init_lock);
|
||||
|
||||
pr_info("setup backing device %s\n", file_name);
|
||||
|
|
|
@ -191,28 +191,43 @@ static void xlp9xx_i2c_drain_rx_fifo(struct xlp9xx_i2c_dev *priv)
|
|||
if (priv->len_recv) {
|
||||
/* read length byte */
|
||||
rlen = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
|
||||
|
||||
/*
|
||||
* We expect at least 2 interrupts for I2C_M_RECV_LEN
|
||||
* transactions. The length is updated during the first
|
||||
* interrupt, and the buffer contents are only copied
|
||||
* during subsequent interrupts. If in case the interrupts
|
||||
* get merged we would complete the transaction without
|
||||
* copying out the bytes from RX fifo. To avoid this now we
|
||||
* drain the fifo as and when data is available.
|
||||
* We drained the rlen byte already, decrement total length
|
||||
* by one.
|
||||
*/
|
||||
|
||||
len--;
|
||||
if (rlen > I2C_SMBUS_BLOCK_MAX || rlen == 0) {
|
||||
rlen = 0; /*abort transfer */
|
||||
priv->msg_buf_remaining = 0;
|
||||
priv->msg_len = 0;
|
||||
} else {
|
||||
xlp9xx_i2c_update_rlen(priv);
|
||||
return;
|
||||
}
|
||||
|
||||
*buf++ = rlen;
|
||||
if (priv->client_pec)
|
||||
++rlen; /* account for error check byte */
|
||||
/* update remaining bytes and message length */
|
||||
priv->msg_buf_remaining = rlen;
|
||||
priv->msg_len = rlen + 1;
|
||||
}
|
||||
xlp9xx_i2c_update_rlen(priv);
|
||||
priv->len_recv = false;
|
||||
} else {
|
||||
}
|
||||
|
||||
len = min(priv->msg_buf_remaining, len);
|
||||
for (i = 0; i < len; i++, buf++)
|
||||
*buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
|
||||
|
||||
priv->msg_buf_remaining -= len;
|
||||
}
|
||||
|
||||
priv->msg_buf = buf;
|
||||
|
||||
if (priv->msg_buf_remaining)
|
||||
|
|
|
@ -2358,14 +2358,16 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
|
|||
int i;
|
||||
|
||||
for (i = 0; i < cpsw->data.slaves; i++) {
|
||||
if (vid == cpsw->slaves[i].port_vlan)
|
||||
return -EINVAL;
|
||||
if (vid == cpsw->slaves[i].port_vlan) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
|
||||
ret = cpsw_add_vlan_ale_entry(priv, vid);
|
||||
|
||||
err:
|
||||
pm_runtime_put(cpsw->dev);
|
||||
return ret;
|
||||
}
|
||||
|
@ -2391,22 +2393,17 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
|
|||
|
||||
for (i = 0; i < cpsw->data.slaves; i++) {
|
||||
if (vid == cpsw->slaves[i].port_vlan)
|
||||
return -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
|
||||
ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
|
||||
ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
|
||||
HOST_PORT_NUM, ALE_VLAN, vid);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
|
||||
ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
|
||||
0, ALE_VLAN, vid);
|
||||
err:
|
||||
pm_runtime_put(cpsw->dev);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -394,7 +394,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
|
|||
|
||||
idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
|
||||
if (idx < 0)
|
||||
return -EINVAL;
|
||||
return -ENOENT;
|
||||
|
||||
cpsw_ale_read(ale, idx, ale_entry);
|
||||
|
||||
|
|
|
@ -895,7 +895,6 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
|
|||
struct sk_buff *skb,
|
||||
struct sk_buff_head *list)
|
||||
{
|
||||
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
RING_IDX cons = queue->rx.rsp_cons;
|
||||
struct sk_buff *nskb;
|
||||
|
||||
|
@ -904,15 +903,16 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
|
|||
RING_GET_RESPONSE(&queue->rx, ++cons);
|
||||
skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
|
||||
|
||||
if (shinfo->nr_frags == MAX_SKB_FRAGS) {
|
||||
if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
|
||||
unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
|
||||
|
||||
BUG_ON(pull_to <= skb_headlen(skb));
|
||||
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
|
||||
}
|
||||
BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
|
||||
BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
|
||||
|
||||
skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
skb_frag_page(nfrag),
|
||||
rx->offset, rx->status, PAGE_SIZE);
|
||||
|
||||
skb_shinfo(nskb)->nr_frags = 0;
|
||||
|
|
|
@ -69,7 +69,7 @@ struct bpf_cpu_map {
|
|||
};
|
||||
|
||||
static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
|
||||
struct xdp_bulk_queue *bq);
|
||||
struct xdp_bulk_queue *bq, bool in_napi_ctx);
|
||||
|
||||
static u64 cpu_map_bitmap_size(const union bpf_attr *attr)
|
||||
{
|
||||
|
@ -375,7 +375,7 @@ static void __cpu_map_entry_free(struct rcu_head *rcu)
|
|||
struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu);
|
||||
|
||||
/* No concurrent bq_enqueue can run at this point */
|
||||
bq_flush_to_queue(rcpu, bq);
|
||||
bq_flush_to_queue(rcpu, bq, false);
|
||||
}
|
||||
free_percpu(rcpu->bulkq);
|
||||
/* Cannot kthread_stop() here, last put free rcpu resources */
|
||||
|
@ -558,7 +558,7 @@ const struct bpf_map_ops cpu_map_ops = {
|
|||
};
|
||||
|
||||
static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
|
||||
struct xdp_bulk_queue *bq)
|
||||
struct xdp_bulk_queue *bq, bool in_napi_ctx)
|
||||
{
|
||||
unsigned int processed = 0, drops = 0;
|
||||
const int to_cpu = rcpu->cpu;
|
||||
|
@ -578,7 +578,10 @@ static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
|
|||
err = __ptr_ring_produce(q, xdpf);
|
||||
if (err) {
|
||||
drops++;
|
||||
if (likely(in_napi_ctx))
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
else
|
||||
xdp_return_frame(xdpf);
|
||||
}
|
||||
processed++;
|
||||
}
|
||||
|
@ -598,7 +601,7 @@ static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
|
|||
struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
|
||||
|
||||
if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
|
||||
bq_flush_to_queue(rcpu, bq);
|
||||
bq_flush_to_queue(rcpu, bq, true);
|
||||
|
||||
/* Notice, xdp_buff/page MUST be queued here, long enough for
|
||||
* driver to code invoking us to finished, due to driver
|
||||
|
@ -661,7 +664,7 @@ void __cpu_map_flush(struct bpf_map *map)
|
|||
|
||||
/* Flush all frames in bulkq to real queue */
|
||||
bq = this_cpu_ptr(rcpu->bulkq);
|
||||
bq_flush_to_queue(rcpu, bq);
|
||||
bq_flush_to_queue(rcpu, bq, true);
|
||||
|
||||
/* If already running, costs spin_lock_irqsave + smb_mb */
|
||||
wake_up_process(rcpu->kthread);
|
||||
|
|
|
@ -217,7 +217,8 @@ void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
|
|||
}
|
||||
|
||||
static int bq_xmit_all(struct bpf_dtab_netdev *obj,
|
||||
struct xdp_bulk_queue *bq, u32 flags)
|
||||
struct xdp_bulk_queue *bq, u32 flags,
|
||||
bool in_napi_ctx)
|
||||
{
|
||||
struct net_device *dev = obj->dev;
|
||||
int sent = 0, drops = 0, err = 0;
|
||||
|
@ -254,7 +255,10 @@ static int bq_xmit_all(struct bpf_dtab_netdev *obj,
|
|||
struct xdp_frame *xdpf = bq->q[i];
|
||||
|
||||
/* RX path under NAPI protection, can return frames faster */
|
||||
if (likely(in_napi_ctx))
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
else
|
||||
xdp_return_frame(xdpf);
|
||||
drops++;
|
||||
}
|
||||
goto out;
|
||||
|
@ -286,7 +290,7 @@ void __dev_map_flush(struct bpf_map *map)
|
|||
__clear_bit(bit, bitmap);
|
||||
|
||||
bq = this_cpu_ptr(dev->bulkq);
|
||||
bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
|
||||
bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -316,7 +320,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
|
|||
struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
|
||||
|
||||
if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
|
||||
bq_xmit_all(obj, bq, 0);
|
||||
bq_xmit_all(obj, bq, 0, true);
|
||||
|
||||
/* Ingress dev_rx will be the same for all xdp_frame's in
|
||||
* bulk_queue, because bq stored per-CPU and must be flushed
|
||||
|
@ -385,7 +389,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
|
|||
__clear_bit(dev->bit, bitmap);
|
||||
|
||||
bq = per_cpu_ptr(dev->bulkq, cpu);
|
||||
bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
|
||||
bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1045,12 +1045,12 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
|||
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
|
||||
|
||||
while (msg_data_left(msg)) {
|
||||
struct sk_msg_buff *m;
|
||||
struct sk_msg_buff *m = NULL;
|
||||
bool enospc = false;
|
||||
int copy;
|
||||
|
||||
if (sk->sk_err) {
|
||||
err = sk->sk_err;
|
||||
err = -sk->sk_err;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
|
@ -1113,9 +1113,12 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
|||
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
|
||||
wait_for_memory:
|
||||
err = sk_stream_wait_memory(sk, &timeo);
|
||||
if (err)
|
||||
if (err) {
|
||||
if (m && m != psock->cork)
|
||||
free_start_sg(sk, m);
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
out_err:
|
||||
if (err < 0)
|
||||
err = sk_stream_error(sk, msg->msg_flags, err);
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
config ARCH_HAS_UBSAN_SANITIZE_ALL
|
||||
bool
|
||||
|
||||
config ARCH_WANTS_UBSAN_NO_NULL
|
||||
def_bool n
|
||||
|
||||
config UBSAN
|
||||
bool "Undefined behaviour sanity checker"
|
||||
help
|
||||
|
@ -39,14 +36,6 @@ config UBSAN_ALIGNMENT
|
|||
Enabling this option on architectures that support unaligned
|
||||
accesses may produce a lot of false positives.
|
||||
|
||||
config UBSAN_NULL
|
||||
bool "Enable checking of null pointers"
|
||||
depends on UBSAN
|
||||
default y if !ARCH_WANTS_UBSAN_NO_NULL
|
||||
help
|
||||
This option enables detection of memory accesses via a
|
||||
null pointer.
|
||||
|
||||
config TEST_UBSAN
|
||||
tristate "Module for testing for undefined behavior detection"
|
||||
depends on m && UBSAN
|
||||
|
|
|
@ -4395,6 +4395,9 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
|
|||
return -EINVAL;
|
||||
|
||||
maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
|
||||
if (!maddr)
|
||||
return -ENOMEM;
|
||||
|
||||
if (write)
|
||||
memcpy_toio(maddr + offset, buf, len);
|
||||
else
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
#include <uapi/linux/bpf.h>
|
||||
#include "bpf_helpers.h"
|
||||
|
||||
#define MAX_CPUS 12 /* WARNING - sync with _user.c */
|
||||
#define MAX_CPUS 64 /* WARNING - sync with _user.c */
|
||||
|
||||
/* Special map type that can XDP_REDIRECT frames to another CPU */
|
||||
struct bpf_map_def SEC("maps") cpu_map = {
|
||||
|
|
|
@ -19,7 +19,7 @@ static const char *__doc__ =
|
|||
#include <arpa/inet.h>
|
||||
#include <linux/if_link.h>
|
||||
|
||||
#define MAX_CPUS 12 /* WARNING - sync with _kern.c */
|
||||
#define MAX_CPUS 64 /* WARNING - sync with _kern.c */
|
||||
|
||||
/* How many xdp_progs are defined in _kern.c */
|
||||
#define MAX_PROG 5
|
||||
|
@ -527,7 +527,7 @@ static void stress_cpumap(void)
|
|||
* procedure.
|
||||
*/
|
||||
create_cpu_entry(1, 1024, 0, false);
|
||||
create_cpu_entry(1, 128, 0, false);
|
||||
create_cpu_entry(1, 8, 0, false);
|
||||
create_cpu_entry(1, 16000, 0, false);
|
||||
}
|
||||
|
||||
|
|
|
@ -14,10 +14,6 @@ ifdef CONFIG_UBSAN_ALIGNMENT
|
|||
CFLAGS_UBSAN += $(call cc-option, -fsanitize=alignment)
|
||||
endif
|
||||
|
||||
ifdef CONFIG_UBSAN_NULL
|
||||
CFLAGS_UBSAN += $(call cc-option, -fsanitize=null)
|
||||
endif
|
||||
|
||||
# -fsanitize=* options makes GCC less smart than usual and
|
||||
# increase number of 'maybe-uninitialized false-positives
|
||||
CFLAGS_UBSAN += $(call cc-option, -Wno-maybe-uninitialized)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
// SPDX-License-Identifier: LGPL-2.1
|
||||
/* Copyright (c) 2018 Facebook */
|
||||
|
||||
#include <stdlib.h>
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* SPDX-License-Identifier: LGPL-2.1 */
|
||||
/* Copyright (c) 2018 Facebook */
|
||||
|
||||
#ifndef __BPF_BTF_H
|
||||
|
|
|
@ -354,7 +354,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
|
|||
while (s->bytes_recvd < total_bytes) {
|
||||
if (txmsg_cork) {
|
||||
timeout.tv_sec = 0;
|
||||
timeout.tv_usec = 1000;
|
||||
timeout.tv_usec = 300000;
|
||||
} else {
|
||||
timeout.tv_sec = 1;
|
||||
timeout.tv_usec = 0;
|
||||
|
|
Loading…
Reference in New Issue