mirror of https://gitee.com/openkylin/linux.git
Networking fixes for 5.14-rc6, including fixes from netfilter, bpf,
can and ieee802154. Current release - regressions: - r8169: fix ASPM-related link-up regressions - bridge: fix flags interpretation for extern learn fdb entries - phy: micrel: fix link detection on ksz87xx switch - Revert "tipc: Return the correct errno code" - ptp: fix possible memory leak caused by invalid cast Current release - new code bugs: - bpf: add missing bpf_read_[un]lock_trace() for syscall program - bpf: fix potentially incorrect results with bpf_get_local_storage() - page_pool: mask the page->signature before the checking, avoid dma mapping leaks - netfilter: nfnetlink_hook: 5 fixes to information in netlink dumps - bnxt_en: fix firmware interface issues with PTP - mlx5: Bridge, fix ageing time Previous releases - regressions: - linkwatch: fix failure to restore device state across suspend/resume - bareudp: fix invalid read beyond skb's linear data Previous releases - always broken: - bpf: fix integer overflow involving bucket_size - ppp: fix issues when desired interface name is specified via netlink - wwan: mhi_wwan_ctrl: fix possible deadlock - dsa: microchip: ksz8795: fix number of VLAN related bugs - dsa: drivers: fix broken backpressure in .port_fdb_dump - dsa: qca: ar9331: make proper initial port defaults Misc: - bpf: add lockdown check for probe_write_user helper - netfilter: conntrack: remove offload_pickup sysctl before 5.14 is out - netfilter: conntrack: collect all entries in one cycle, heuristically slow down garbage collection scans on idle systems to prevent frequent wake ups Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmEVb/AACgkQMUZtbf5S Irvlzw//XGDHNNPPOueHVhYK50+WiqPMxezQ5nbnG6uR6JtPyirMNTgzST8rQRsu HmQy8/Oi6bK5rbPC9iDtKK28ba6Ldvu1ic8lTkuWyNNthG/pZGJJQ+Pg7dmkd7te soJGZKnTbNWwbgGOFbfw9rLRuzWsjQjQ43vxTMjjNnpOwNxANuNR1GN0S/t8e9di 9BBT8jtgcHhtW5jRMHMNWHk+k8aeyIZPxjl9fjzzsMt7meX50DFrCJgf8bKkZ5dA W2b/fzUyMqVQJpgmIY4ktFmR4mV382pWOOs6rl+ppSu+mU/gpTuYCofF7FqAUU5S 71mzukW6KdOrqymVuwiTXBlGnZB370aT7aUU5PHL/ZkDJ9shSyVRcg/iQa40myzn 5wxunZX936z5f84bxZPW1J5bBZklba8deKPXHUkl5RoIXsN2qWFPJpZ1M0eHyfPm ZdqvRZ1IkSSFZFr6FF374bEqa88NK1wbVKUbGQ+yn8abE+HQfXQR9ZWZa1DR1wkb rF8XWOHjQLp/zlTRnj3gj3T4pEwc5L1QOt7RUrYfI36Mh7iUz5EdzowaiEaDQT6/ neThilci1F6Mz4Uf65pK4TaDTDvj1tqqAdg3g8uneHBTFARS+htGXqkaKxP6kSi+ T/W4woOqCRT6c0+BhZ2jPRhKsMZ5kR1vKLUVBHShChq32mDpn6g= =hzDl -----END PGP SIGNATURE----- Merge tag 'net-5.14-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "Networking fixes, including fixes from netfilter, bpf, can and ieee802154. The size of this is pretty normal, but we got more fixes for 5.14 changes this week than last week. Nothing major but the trend is the opposite of what we like. We'll see how the next week goes.. Current release - regressions: - r8169: fix ASPM-related link-up regressions - bridge: fix flags interpretation for extern learn fdb entries - phy: micrel: fix link detection on ksz87xx switch - Revert "tipc: Return the correct errno code" - ptp: fix possible memory leak caused by invalid cast Current release - new code bugs: - bpf: add missing bpf_read_[un]lock_trace() for syscall program - bpf: fix potentially incorrect results with bpf_get_local_storage() - page_pool: mask the page->signature before the checking, avoid dma mapping leaks - netfilter: nfnetlink_hook: 5 fixes to information in netlink dumps - bnxt_en: fix firmware interface issues with PTP - mlx5: Bridge, fix ageing time Previous releases - regressions: - linkwatch: fix failure to restore device state across suspend/resume - bareudp: fix invalid read beyond skb's linear data Previous releases - always broken: - bpf: fix integer overflow involving bucket_size - ppp: fix issues when desired interface name is specified via netlink - wwan: mhi_wwan_ctrl: fix possible deadlock - dsa: microchip: ksz8795: fix number of VLAN related bugs - dsa: drivers: fix broken backpressure in .port_fdb_dump - dsa: qca: ar9331: make proper initial port defaults Misc: - bpf: add lockdown check for probe_write_user helper - netfilter: conntrack: remove offload_pickup sysctl before 5.14 is out - netfilter: conntrack: collect all entries in one cycle, heuristically slow down garbage collection scans on idle systems to prevent frequent wake ups" * tag 'net-5.14-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (87 commits) vsock/virtio: avoid potential deadlock when vsock device remove wwan: core: Avoid returning NULL from wwan_create_dev() net: dsa: sja1105: unregister the MDIO buses during teardown Revert "tipc: Return the correct errno code" net: mscc: Fix non-GPL export of regmap APIs net: igmp: increase size of mr_ifc_count MAINTAINERS: switch to my OMP email for Renesas Ethernet drivers tcp_bbr: fix u32 wrap bug in round logic if bbr_init() called after 2B packets net: pcs: xpcs: fix error handling on failed to allocate memory net: linkwatch: fix failure to restore device state across suspend/resume net: bridge: fix memleak in br_add_if() net: switchdev: zero-initialize struct switchdev_notifier_fdb_info emitted by drivers towards the bridge net: bridge: fix flags interpretation for extern learn fdb entries net: dsa: sja1105: fix broken backpressure in .port_fdb_dump net: dsa: lantiq: fix broken backpressure in .port_fdb_dump net: dsa: lan9303: fix broken backpressure in .port_fdb_dump net: dsa: hellcreek: fix broken backpressure in .port_fdb_dump bpf, core: Fix kernel-doc notation net: igmp: fix data-race in igmp_ifc_timer_expire() net: Fix memory leak in ieee802154_raw_deliver ...
This commit is contained in:
commit
f8e6dfc64f
|
@ -108,7 +108,7 @@ This bump in ABI version is at most once per kernel development cycle.
|
|||
|
||||
For example, if current state of ``libbpf.map`` is:
|
||||
|
||||
.. code-block:: c
|
||||
.. code-block:: none
|
||||
|
||||
LIBBPF_0.0.1 {
|
||||
global:
|
||||
|
@ -121,7 +121,7 @@ For example, if current state of ``libbpf.map`` is:
|
|||
, and a new symbol ``bpf_func_c`` is being introduced, then
|
||||
``libbpf.map`` should be changed like this:
|
||||
|
||||
.. code-block:: c
|
||||
.. code-block:: none
|
||||
|
||||
LIBBPF_0.0.1 {
|
||||
global:
|
||||
|
|
|
@ -191,19 +191,9 @@ nf_flowtable_tcp_timeout - INTEGER (seconds)
|
|||
TCP connections may be offloaded from nf conntrack to nf flow table.
|
||||
Once aged, the connection is returned to nf conntrack with tcp pickup timeout.
|
||||
|
||||
nf_flowtable_tcp_pickup - INTEGER (seconds)
|
||||
default 120
|
||||
|
||||
TCP connection timeout after being aged from nf flow table offload.
|
||||
|
||||
nf_flowtable_udp_timeout - INTEGER (seconds)
|
||||
default 30
|
||||
|
||||
Control offload timeout for udp connections.
|
||||
UDP connections may be offloaded from nf conntrack to nf flow table.
|
||||
Once aged, the connection is returned to nf conntrack with udp pickup timeout.
|
||||
|
||||
nf_flowtable_udp_pickup - INTEGER (seconds)
|
||||
default 30
|
||||
|
||||
UDP connection timeout after being aged from nf flow table offload.
|
||||
|
|
|
@ -11327,7 +11327,7 @@ W: https://linuxtv.org
|
|||
T: git git://linuxtv.org/media_tree.git
|
||||
F: drivers/media/radio/radio-maxiradio*
|
||||
|
||||
MCAB MICROCHIP CAN BUS ANALYZER TOOL DRIVER
|
||||
MCBA MICROCHIP CAN BUS ANALYZER TOOL DRIVER
|
||||
R: Yasushi SHOJI <yashi@spacecubics.com>
|
||||
L: linux-can@vger.kernel.org
|
||||
S: Maintained
|
||||
|
@ -15803,7 +15803,7 @@ F: Documentation/devicetree/bindings/i2c/renesas,iic-emev2.yaml
|
|||
F: drivers/i2c/busses/i2c-emev2.c
|
||||
|
||||
RENESAS ETHERNET DRIVERS
|
||||
R: Sergei Shtylyov <sergei.shtylyov@gmail.com>
|
||||
R: Sergey Shtylyov <s.shtylyov@omp.ru>
|
||||
L: netdev@vger.kernel.org
|
||||
L: linux-renesas-soc@vger.kernel.org
|
||||
F: Documentation/devicetree/bindings/net/renesas,*.yaml
|
||||
|
|
|
@ -945,7 +945,6 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|||
u32 *cqb = NULL;
|
||||
void *cqc;
|
||||
int cqe_size;
|
||||
unsigned int irqn;
|
||||
int eqn;
|
||||
int err;
|
||||
|
||||
|
@ -984,7 +983,7 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|||
INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
|
||||
}
|
||||
|
||||
err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
|
||||
err = mlx5_vector2eqn(dev->mdev, vector, &eqn);
|
||||
if (err)
|
||||
goto err_cqb;
|
||||
|
||||
|
@ -1007,7 +1006,6 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|||
goto err_cqb;
|
||||
|
||||
mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
|
||||
cq->mcq.irqn = irqn;
|
||||
if (udata)
|
||||
cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
|
||||
else
|
||||
|
|
|
@ -975,7 +975,6 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
|
|||
struct mlx5_ib_dev *dev;
|
||||
int user_vector;
|
||||
int dev_eqn;
|
||||
unsigned int irqn;
|
||||
int err;
|
||||
|
||||
if (uverbs_copy_from(&user_vector, attrs,
|
||||
|
@ -987,7 +986,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
|
|||
return PTR_ERR(c);
|
||||
dev = to_mdev(c->ibucontext.device);
|
||||
|
||||
err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
|
||||
err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
|
|
@ -71,12 +71,18 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
|||
family = AF_INET6;
|
||||
|
||||
if (bareudp->ethertype == htons(ETH_P_IP)) {
|
||||
struct iphdr *iphdr;
|
||||
__u8 ipversion;
|
||||
|
||||
iphdr = (struct iphdr *)(skb->data + BAREUDP_BASE_HLEN);
|
||||
if (iphdr->version == 4) {
|
||||
proto = bareudp->ethertype;
|
||||
} else if (bareudp->multi_proto_mode && (iphdr->version == 6)) {
|
||||
if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
|
||||
sizeof(ipversion))) {
|
||||
bareudp->dev->stats.rx_dropped++;
|
||||
goto drop;
|
||||
}
|
||||
ipversion >>= 4;
|
||||
|
||||
if (ipversion == 4) {
|
||||
proto = htons(ETH_P_IP);
|
||||
} else if (ipversion == 6 && bareudp->multi_proto_mode) {
|
||||
proto = htons(ETH_P_IPV6);
|
||||
} else {
|
||||
bareudp->dev->stats.rx_dropped++;
|
||||
|
|
|
@ -1164,10 +1164,10 @@ static int m_can_set_bittiming(struct net_device *dev)
|
|||
FIELD_PREP(TDCR_TDCO_MASK, tdco));
|
||||
}
|
||||
|
||||
reg_btp = FIELD_PREP(NBTP_NBRP_MASK, brp) |
|
||||
FIELD_PREP(NBTP_NSJW_MASK, sjw) |
|
||||
FIELD_PREP(NBTP_NTSEG1_MASK, tseg1) |
|
||||
FIELD_PREP(NBTP_NTSEG2_MASK, tseg2);
|
||||
reg_btp |= FIELD_PREP(DBTP_DBRP_MASK, brp) |
|
||||
FIELD_PREP(DBTP_DSJW_MASK, sjw) |
|
||||
FIELD_PREP(DBTP_DTSEG1_MASK, tseg1) |
|
||||
FIELD_PREP(DBTP_DTSEG2_MASK, tseg2);
|
||||
|
||||
m_can_write(cdev, M_CAN_DBTP, reg_btp);
|
||||
}
|
||||
|
|
|
@ -912,6 +912,7 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
|
|||
{
|
||||
struct hellcreek *hellcreek = ds->priv;
|
||||
u16 entries;
|
||||
int ret = 0;
|
||||
size_t i;
|
||||
|
||||
mutex_lock(&hellcreek->reg_lock);
|
||||
|
@ -943,12 +944,14 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
|
|||
if (!(entry.portmask & BIT(port)))
|
||||
continue;
|
||||
|
||||
cb(entry.mac, 0, entry.is_static, data);
|
||||
ret = cb(entry.mac, 0, entry.is_static, data);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&hellcreek->reg_lock);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hellcreek_vlan_filtering(struct dsa_switch *ds, int port,
|
||||
|
|
|
@ -557,12 +557,12 @@ static int lan9303_alr_make_entry_raw(struct lan9303 *chip, u32 dat0, u32 dat1)
|
|||
return 0;
|
||||
}
|
||||
|
||||
typedef void alr_loop_cb_t(struct lan9303 *chip, u32 dat0, u32 dat1,
|
||||
int portmap, void *ctx);
|
||||
typedef int alr_loop_cb_t(struct lan9303 *chip, u32 dat0, u32 dat1,
|
||||
int portmap, void *ctx);
|
||||
|
||||
static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
|
||||
static int lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
|
||||
{
|
||||
int i;
|
||||
int ret = 0, i;
|
||||
|
||||
mutex_lock(&chip->alr_mutex);
|
||||
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
|
||||
|
@ -582,13 +582,17 @@ static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
|
|||
LAN9303_ALR_DAT1_PORT_BITOFFS;
|
||||
portmap = alrport_2_portmap[alrport];
|
||||
|
||||
cb(chip, dat0, dat1, portmap, ctx);
|
||||
ret = cb(chip, dat0, dat1, portmap, ctx);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
|
||||
LAN9303_ALR_CMD_GET_NEXT);
|
||||
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
|
||||
}
|
||||
mutex_unlock(&chip->alr_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void alr_reg_to_mac(u32 dat0, u32 dat1, u8 mac[6])
|
||||
|
@ -606,18 +610,20 @@ struct del_port_learned_ctx {
|
|||
};
|
||||
|
||||
/* Clear learned (non-static) entry on given port */
|
||||
static void alr_loop_cb_del_port_learned(struct lan9303 *chip, u32 dat0,
|
||||
u32 dat1, int portmap, void *ctx)
|
||||
static int alr_loop_cb_del_port_learned(struct lan9303 *chip, u32 dat0,
|
||||
u32 dat1, int portmap, void *ctx)
|
||||
{
|
||||
struct del_port_learned_ctx *del_ctx = ctx;
|
||||
int port = del_ctx->port;
|
||||
|
||||
if (((BIT(port) & portmap) == 0) || (dat1 & LAN9303_ALR_DAT1_STATIC))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
/* learned entries has only one port, we can just delete */
|
||||
dat1 &= ~LAN9303_ALR_DAT1_VALID; /* delete entry */
|
||||
lan9303_alr_make_entry_raw(chip, dat0, dat1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct port_fdb_dump_ctx {
|
||||
|
@ -626,19 +632,19 @@ struct port_fdb_dump_ctx {
|
|||
dsa_fdb_dump_cb_t *cb;
|
||||
};
|
||||
|
||||
static void alr_loop_cb_fdb_port_dump(struct lan9303 *chip, u32 dat0,
|
||||
u32 dat1, int portmap, void *ctx)
|
||||
static int alr_loop_cb_fdb_port_dump(struct lan9303 *chip, u32 dat0,
|
||||
u32 dat1, int portmap, void *ctx)
|
||||
{
|
||||
struct port_fdb_dump_ctx *dump_ctx = ctx;
|
||||
u8 mac[ETH_ALEN];
|
||||
bool is_static;
|
||||
|
||||
if ((BIT(dump_ctx->port) & portmap) == 0)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
alr_reg_to_mac(dat0, dat1, mac);
|
||||
is_static = !!(dat1 & LAN9303_ALR_DAT1_STATIC);
|
||||
dump_ctx->cb(mac, 0, is_static, dump_ctx->data);
|
||||
return dump_ctx->cb(mac, 0, is_static, dump_ctx->data);
|
||||
}
|
||||
|
||||
/* Set a static ALR entry. Delete entry if port_map is zero */
|
||||
|
@ -1210,9 +1216,7 @@ static int lan9303_port_fdb_dump(struct dsa_switch *ds, int port,
|
|||
};
|
||||
|
||||
dev_dbg(chip->dev, "%s(%d)\n", __func__, port);
|
||||
lan9303_alr_loop(chip, alr_loop_cb_fdb_port_dump, &dump_ctx);
|
||||
|
||||
return 0;
|
||||
return lan9303_alr_loop(chip, alr_loop_cb_fdb_port_dump, &dump_ctx);
|
||||
}
|
||||
|
||||
static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
|
||||
|
|
|
@ -1404,11 +1404,17 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
|
|||
addr[1] = mac_bridge.key[2] & 0xff;
|
||||
addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
|
||||
if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) {
|
||||
if (mac_bridge.val[0] & BIT(port))
|
||||
cb(addr, 0, true, data);
|
||||
if (mac_bridge.val[0] & BIT(port)) {
|
||||
err = cb(addr, 0, true, data);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
} else {
|
||||
if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port)
|
||||
cb(addr, 0, false, data);
|
||||
if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) {
|
||||
err = cb(addr, 0, false, data);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -687,8 +687,8 @@ static void ksz8_r_vlan_entries(struct ksz_device *dev, u16 addr)
|
|||
shifts = ksz8->shifts;
|
||||
|
||||
ksz8_r_table(dev, TABLE_VLAN, addr, &data);
|
||||
addr *= dev->phy_port_cnt;
|
||||
for (i = 0; i < dev->phy_port_cnt; i++) {
|
||||
addr *= 4;
|
||||
for (i = 0; i < 4; i++) {
|
||||
dev->vlan_cache[addr + i].table[0] = (u16)data;
|
||||
data >>= shifts[VLAN_TABLE];
|
||||
}
|
||||
|
@ -702,7 +702,7 @@ static void ksz8_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan)
|
|||
u64 buf;
|
||||
|
||||
data = (u16 *)&buf;
|
||||
addr = vid / dev->phy_port_cnt;
|
||||
addr = vid / 4;
|
||||
index = vid & 3;
|
||||
ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
|
||||
*vlan = data[index];
|
||||
|
@ -716,7 +716,7 @@ static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
|
|||
u64 buf;
|
||||
|
||||
data = (u16 *)&buf;
|
||||
addr = vid / dev->phy_port_cnt;
|
||||
addr = vid / 4;
|
||||
index = vid & 3;
|
||||
ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
|
||||
data[index] = vlan;
|
||||
|
@ -1119,24 +1119,67 @@ static int ksz8_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag,
|
|||
if (ksz_is_ksz88x3(dev))
|
||||
return -ENOTSUPP;
|
||||
|
||||
/* Discard packets with VID not enabled on the switch */
|
||||
ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag);
|
||||
|
||||
/* Discard packets with VID not enabled on the ingress port */
|
||||
for (port = 0; port < dev->phy_port_cnt; ++port)
|
||||
ksz_port_cfg(dev, port, REG_PORT_CTRL_2, PORT_INGRESS_FILTER,
|
||||
flag);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ksz8_port_enable_pvid(struct ksz_device *dev, int port, bool state)
|
||||
{
|
||||
if (ksz_is_ksz88x3(dev)) {
|
||||
ksz_cfg(dev, REG_SW_INSERT_SRC_PVID,
|
||||
0x03 << (4 - 2 * port), state);
|
||||
} else {
|
||||
ksz_pwrite8(dev, port, REG_PORT_CTRL_12, state ? 0x0f : 0x00);
|
||||
}
|
||||
}
|
||||
|
||||
static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_vlan *vlan,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
|
||||
struct ksz_device *dev = ds->priv;
|
||||
struct ksz_port *p = &dev->ports[port];
|
||||
u16 data, new_pvid = 0;
|
||||
u8 fid, member, valid;
|
||||
|
||||
if (ksz_is_ksz88x3(dev))
|
||||
return -ENOTSUPP;
|
||||
|
||||
ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
|
||||
/* If a VLAN is added with untagged flag different from the
|
||||
* port's Remove Tag flag, we need to change the latter.
|
||||
* Ignore VID 0, which is always untagged.
|
||||
* Ignore CPU port, which will always be tagged.
|
||||
*/
|
||||
if (untagged != p->remove_tag && vlan->vid != 0 &&
|
||||
port != dev->cpu_port) {
|
||||
unsigned int vid;
|
||||
|
||||
/* Reject attempts to add a VLAN that requires the
|
||||
* Remove Tag flag to be changed, unless there are no
|
||||
* other VLANs currently configured.
|
||||
*/
|
||||
for (vid = 1; vid < dev->num_vlans; ++vid) {
|
||||
/* Skip the VID we are going to add or reconfigure */
|
||||
if (vid == vlan->vid)
|
||||
continue;
|
||||
|
||||
ksz8_from_vlan(dev, dev->vlan_cache[vid].table[0],
|
||||
&fid, &member, &valid);
|
||||
if (valid && (member & BIT(port)))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
|
||||
p->remove_tag = untagged;
|
||||
}
|
||||
|
||||
ksz8_r_vlan_table(dev, vlan->vid, &data);
|
||||
ksz8_from_vlan(dev, data, &fid, &member, &valid);
|
||||
|
@ -1160,9 +1203,11 @@ static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
|
|||
u16 vid;
|
||||
|
||||
ksz_pread16(dev, port, REG_PORT_CTRL_VID, &vid);
|
||||
vid &= 0xfff;
|
||||
vid &= ~VLAN_VID_MASK;
|
||||
vid |= new_pvid;
|
||||
ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, vid);
|
||||
|
||||
ksz8_port_enable_pvid(dev, port, true);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1171,9 +1216,8 @@ static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
|
|||
static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_vlan *vlan)
|
||||
{
|
||||
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
|
||||
struct ksz_device *dev = ds->priv;
|
||||
u16 data, pvid, new_pvid = 0;
|
||||
u16 data, pvid;
|
||||
u8 fid, member, valid;
|
||||
|
||||
if (ksz_is_ksz88x3(dev))
|
||||
|
@ -1182,8 +1226,6 @@ static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
|
|||
ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid);
|
||||
pvid = pvid & 0xFFF;
|
||||
|
||||
ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
|
||||
|
||||
ksz8_r_vlan_table(dev, vlan->vid, &data);
|
||||
ksz8_from_vlan(dev, data, &fid, &member, &valid);
|
||||
|
||||
|
@ -1195,14 +1237,11 @@ static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
|
|||
valid = 0;
|
||||
}
|
||||
|
||||
if (pvid == vlan->vid)
|
||||
new_pvid = 1;
|
||||
|
||||
ksz8_to_vlan(dev, fid, member, valid, &data);
|
||||
ksz8_w_vlan_table(dev, vlan->vid, data);
|
||||
|
||||
if (new_pvid != pvid)
|
||||
ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, pvid);
|
||||
if (pvid == vlan->vid)
|
||||
ksz8_port_enable_pvid(dev, port, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1435,6 +1474,9 @@ static int ksz8_setup(struct dsa_switch *ds)
|
|||
|
||||
ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
|
||||
|
||||
if (!ksz_is_ksz88x3(dev))
|
||||
ksz_cfg(dev, REG_SW_CTRL_19, SW_INS_TAG_ENABLE, true);
|
||||
|
||||
/* set broadcast storm protection 10% rate */
|
||||
regmap_update_bits(dev->regmap[1], S_REPLACE_VID_CTRL,
|
||||
BROADCAST_STORM_RATE,
|
||||
|
@ -1717,6 +1759,16 @@ static int ksz8_switch_init(struct ksz_device *dev)
|
|||
/* set the real number of ports */
|
||||
dev->ds->num_ports = dev->port_cnt;
|
||||
|
||||
/* We rely on software untagging on the CPU port, so that we
|
||||
* can support both tagged and untagged VLANs
|
||||
*/
|
||||
dev->ds->untag_bridge_pvid = true;
|
||||
|
||||
/* VLAN filtering is partly controlled by the global VLAN
|
||||
* Enable flag
|
||||
*/
|
||||
dev->ds->vlan_filtering_is_global = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -631,6 +631,10 @@
|
|||
#define REG_PORT_4_OUT_RATE_3 0xEE
|
||||
#define REG_PORT_5_OUT_RATE_3 0xFE
|
||||
|
||||
/* 88x3 specific */
|
||||
|
||||
#define REG_SW_INSERT_SRC_PVID 0xC2
|
||||
|
||||
/* PME */
|
||||
|
||||
#define SW_PME_OUTPUT_ENABLE BIT(1)
|
||||
|
|
|
@ -27,6 +27,7 @@ struct ksz_port_mib {
|
|||
struct ksz_port {
|
||||
u16 member;
|
||||
u16 vid_member;
|
||||
bool remove_tag; /* Remove Tag flag set, for ksz8795 only */
|
||||
int stp_state;
|
||||
struct phy_device phydev;
|
||||
|
||||
|
@ -205,12 +206,8 @@ static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val)
|
|||
int ret;
|
||||
|
||||
ret = regmap_bulk_read(dev->regmap[2], reg, value, 2);
|
||||
if (!ret) {
|
||||
/* Ick! ToDo: Add 64bit R/W to regmap on 32bit systems */
|
||||
value[0] = swab32(value[0]);
|
||||
value[1] = swab32(value[1]);
|
||||
*val = swab64((u64)*value);
|
||||
}
|
||||
if (!ret)
|
||||
*val = (u64)value[0] << 32 | value[1];
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -47,6 +47,7 @@ static const struct mt7530_mib_desc mt7530_mib[] = {
|
|||
MIB_DESC(2, 0x48, "TxBytes"),
|
||||
MIB_DESC(1, 0x60, "RxDrop"),
|
||||
MIB_DESC(1, 0x64, "RxFiltering"),
|
||||
MIB_DESC(1, 0x68, "RxUnicast"),
|
||||
MIB_DESC(1, 0x6c, "RxMulticast"),
|
||||
MIB_DESC(1, 0x70, "RxBroadcast"),
|
||||
MIB_DESC(1, 0x74, "RxAlignErr"),
|
||||
|
|
|
@ -101,6 +101,23 @@
|
|||
AR9331_SW_PORT_STATUS_RX_FLOW_EN | AR9331_SW_PORT_STATUS_TX_FLOW_EN | \
|
||||
AR9331_SW_PORT_STATUS_SPEED_M)
|
||||
|
||||
#define AR9331_SW_REG_PORT_CTRL(_port) (0x104 + (_port) * 0x100)
|
||||
#define AR9331_SW_PORT_CTRL_HEAD_EN BIT(11)
|
||||
#define AR9331_SW_PORT_CTRL_PORT_STATE GENMASK(2, 0)
|
||||
#define AR9331_SW_PORT_CTRL_PORT_STATE_DISABLED 0
|
||||
#define AR9331_SW_PORT_CTRL_PORT_STATE_BLOCKING 1
|
||||
#define AR9331_SW_PORT_CTRL_PORT_STATE_LISTENING 2
|
||||
#define AR9331_SW_PORT_CTRL_PORT_STATE_LEARNING 3
|
||||
#define AR9331_SW_PORT_CTRL_PORT_STATE_FORWARD 4
|
||||
|
||||
#define AR9331_SW_REG_PORT_VLAN(_port) (0x108 + (_port) * 0x100)
|
||||
#define AR9331_SW_PORT_VLAN_8021Q_MODE GENMASK(31, 30)
|
||||
#define AR9331_SW_8021Q_MODE_SECURE 3
|
||||
#define AR9331_SW_8021Q_MODE_CHECK 2
|
||||
#define AR9331_SW_8021Q_MODE_FALLBACK 1
|
||||
#define AR9331_SW_8021Q_MODE_NONE 0
|
||||
#define AR9331_SW_PORT_VLAN_PORT_VID_MEMBER GENMASK(25, 16)
|
||||
|
||||
/* MIB registers */
|
||||
#define AR9331_MIB_COUNTER(x) (0x20000 + ((x) * 0x100))
|
||||
|
||||
|
@ -371,11 +388,59 @@ static int ar9331_sw_mbus_init(struct ar9331_sw_priv *priv)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ar9331_sw_setup_port(struct dsa_switch *ds, int port)
|
||||
{
|
||||
struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
|
||||
struct regmap *regmap = priv->regmap;
|
||||
u32 port_mask, port_ctrl, val;
|
||||
int ret;
|
||||
|
||||
/* Generate default port settings */
|
||||
port_ctrl = FIELD_PREP(AR9331_SW_PORT_CTRL_PORT_STATE,
|
||||
AR9331_SW_PORT_CTRL_PORT_STATE_FORWARD);
|
||||
|
||||
if (dsa_is_cpu_port(ds, port)) {
|
||||
/* CPU port should be allowed to communicate with all user
|
||||
* ports.
|
||||
*/
|
||||
port_mask = dsa_user_ports(ds);
|
||||
/* Enable Atheros header on CPU port. This will allow us
|
||||
* communicate with each port separately
|
||||
*/
|
||||
port_ctrl |= AR9331_SW_PORT_CTRL_HEAD_EN;
|
||||
} else if (dsa_is_user_port(ds, port)) {
|
||||
/* User ports should communicate only with the CPU port.
|
||||
*/
|
||||
port_mask = BIT(dsa_upstream_port(ds, port));
|
||||
} else {
|
||||
/* Other ports do not need to communicate at all */
|
||||
port_mask = 0;
|
||||
}
|
||||
|
||||
val = FIELD_PREP(AR9331_SW_PORT_VLAN_8021Q_MODE,
|
||||
AR9331_SW_8021Q_MODE_NONE) |
|
||||
FIELD_PREP(AR9331_SW_PORT_VLAN_PORT_VID_MEMBER, port_mask);
|
||||
|
||||
ret = regmap_write(regmap, AR9331_SW_REG_PORT_VLAN(port), val);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = regmap_write(regmap, AR9331_SW_REG_PORT_CTRL(port), port_ctrl);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
return 0;
|
||||
error:
|
||||
dev_err(priv->dev, "%s: error: %i\n", __func__, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ar9331_sw_setup(struct dsa_switch *ds)
|
||||
{
|
||||
struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
|
||||
struct regmap *regmap = priv->regmap;
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
ret = ar9331_sw_reset(priv);
|
||||
if (ret)
|
||||
|
@ -402,6 +467,12 @@ static int ar9331_sw_setup(struct dsa_switch *ds)
|
|||
if (ret)
|
||||
goto error;
|
||||
|
||||
for (i = 0; i < ds->num_ports; i++) {
|
||||
ret = ar9331_sw_setup_port(ds, i);
|
||||
if (ret)
|
||||
goto error;
|
||||
}
|
||||
|
||||
ds->configure_vlan_while_not_filtering = false;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1635,7 +1635,9 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
|
|||
/* We need to hide the dsa_8021q VLANs from the user. */
|
||||
if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
|
||||
l2_lookup.vlanid = 0;
|
||||
cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
|
||||
rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -3185,6 +3187,7 @@ static void sja1105_teardown(struct dsa_switch *ds)
|
|||
}
|
||||
|
||||
sja1105_devlink_teardown(ds);
|
||||
sja1105_mdiobus_unregister(ds);
|
||||
sja1105_flower_teardown(ds);
|
||||
sja1105_tas_teardown(ds);
|
||||
sja1105_ptp_clock_unregister(ds);
|
||||
|
|
|
@ -426,7 +426,10 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
|
||||
atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
|
||||
if (!bnxt_ptp_parse(skb, &ptp->tx_seqid)) {
|
||||
if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
|
||||
&ptp->tx_hdr_off)) {
|
||||
if (vlan_tag_flags)
|
||||
ptp->tx_hdr_off += VLAN_HLEN;
|
||||
lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
|
||||
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
||||
} else {
|
||||
|
|
|
@ -368,6 +368,7 @@ struct cmd_nums {
|
|||
#define HWRM_FUNC_PTP_TS_QUERY 0x19fUL
|
||||
#define HWRM_FUNC_PTP_EXT_CFG 0x1a0UL
|
||||
#define HWRM_FUNC_PTP_EXT_QCFG 0x1a1UL
|
||||
#define HWRM_FUNC_KEY_CTX_ALLOC 0x1a2UL
|
||||
#define HWRM_SELFTEST_QLIST 0x200UL
|
||||
#define HWRM_SELFTEST_EXEC 0x201UL
|
||||
#define HWRM_SELFTEST_IRQ 0x202UL
|
||||
|
@ -531,8 +532,8 @@ struct hwrm_err_output {
|
|||
#define HWRM_VERSION_MAJOR 1
|
||||
#define HWRM_VERSION_MINOR 10
|
||||
#define HWRM_VERSION_UPDATE 2
|
||||
#define HWRM_VERSION_RSVD 47
|
||||
#define HWRM_VERSION_STR "1.10.2.47"
|
||||
#define HWRM_VERSION_RSVD 52
|
||||
#define HWRM_VERSION_STR "1.10.2.52"
|
||||
|
||||
/* hwrm_ver_get_input (size:192b/24B) */
|
||||
struct hwrm_ver_get_input {
|
||||
|
@ -585,6 +586,7 @@ struct hwrm_ver_get_output {
|
|||
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL
|
||||
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
|
||||
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL
|
||||
#define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL
|
||||
u8 roce_fw_maj_8b;
|
||||
u8 roce_fw_min_8b;
|
||||
u8 roce_fw_bld_8b;
|
||||
|
@ -886,7 +888,8 @@ struct hwrm_async_event_cmpl_reset_notify {
|
|||
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8)
|
||||
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8)
|
||||
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET (0x4UL << 8)
|
||||
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET
|
||||
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION (0x5UL << 8)
|
||||
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION
|
||||
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL
|
||||
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16
|
||||
};
|
||||
|
@ -1236,13 +1239,14 @@ struct hwrm_async_event_cmpl_error_report_base {
|
|||
u8 timestamp_lo;
|
||||
__le16 timestamp_hi;
|
||||
__le32 event_data1;
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD
|
||||
};
|
||||
|
||||
/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
|
||||
|
@ -1446,6 +1450,8 @@ struct hwrm_func_vf_cfg_input {
|
|||
#define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL
|
||||
#define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL
|
||||
#define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL
|
||||
#define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_KEY_CTXS 0x1000UL
|
||||
#define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_KEY_CTXS 0x2000UL
|
||||
__le16 mtu;
|
||||
__le16 guest_vlan;
|
||||
__le16 async_event_cr;
|
||||
|
@ -1469,7 +1475,8 @@ struct hwrm_func_vf_cfg_input {
|
|||
__le16 num_vnics;
|
||||
__le16 num_stat_ctxs;
|
||||
__le16 num_hw_ring_grps;
|
||||
u8 unused_0[4];
|
||||
__le16 num_tx_key_ctxs;
|
||||
__le16 num_rx_key_ctxs;
|
||||
};
|
||||
|
||||
/* hwrm_func_vf_cfg_output (size:128b/16B) */
|
||||
|
@ -1493,7 +1500,7 @@ struct hwrm_func_qcaps_input {
|
|||
u8 unused_0[6];
|
||||
};
|
||||
|
||||
/* hwrm_func_qcaps_output (size:704b/88B) */
|
||||
/* hwrm_func_qcaps_output (size:768b/96B) */
|
||||
struct hwrm_func_qcaps_output {
|
||||
__le16 error_code;
|
||||
__le16 req_type;
|
||||
|
@ -1587,7 +1594,8 @@ struct hwrm_func_qcaps_output {
|
|||
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TE_CFA 0x4UL
|
||||
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA 0x8UL
|
||||
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL
|
||||
u8 unused_1;
|
||||
__le16 max_key_ctxs_alloc;
|
||||
u8 unused_1[7];
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
|
@ -1602,7 +1610,7 @@ struct hwrm_func_qcfg_input {
|
|||
u8 unused_0[6];
|
||||
};
|
||||
|
||||
/* hwrm_func_qcfg_output (size:832b/104B) */
|
||||
/* hwrm_func_qcfg_output (size:896b/112B) */
|
||||
struct hwrm_func_qcfg_output {
|
||||
__le16 error_code;
|
||||
__le16 req_type;
|
||||
|
@ -1749,11 +1757,13 @@ struct hwrm_func_qcfg_output {
|
|||
#define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
|
||||
#define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
|
||||
__le16 host_mtu;
|
||||
u8 unused_3;
|
||||
__le16 alloc_tx_key_ctxs;
|
||||
__le16 alloc_rx_key_ctxs;
|
||||
u8 unused_3[5];
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
/* hwrm_func_cfg_input (size:832b/104B) */
|
||||
/* hwrm_func_cfg_input (size:896b/112B) */
|
||||
struct hwrm_func_cfg_input {
|
||||
__le16 req_type;
|
||||
__le16 cmpl_ring;
|
||||
|
@ -1820,6 +1830,8 @@ struct hwrm_func_cfg_input {
|
|||
#define FUNC_CFG_REQ_ENABLES_PARTITION_MAX_BW 0x8000000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_TPID 0x10000000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_HOST_MTU 0x20000000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_TX_KEY_CTXS 0x40000000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_RX_KEY_CTXS 0x80000000UL
|
||||
__le16 admin_mtu;
|
||||
__le16 mru;
|
||||
__le16 num_rsscos_ctxs;
|
||||
|
@ -1929,6 +1941,9 @@ struct hwrm_func_cfg_input {
|
|||
#define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
|
||||
__be16 tpid;
|
||||
__le16 host_mtu;
|
||||
__le16 num_tx_key_ctxs;
|
||||
__le16 num_rx_key_ctxs;
|
||||
u8 unused_0[4];
|
||||
};
|
||||
|
||||
/* hwrm_func_cfg_output (size:128b/16B) */
|
||||
|
@ -2099,6 +2114,7 @@ struct hwrm_func_drv_rgtr_input {
|
|||
#define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL
|
||||
#define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL
|
||||
#define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL
|
||||
#define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL
|
||||
__le32 enables;
|
||||
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
|
||||
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
|
||||
|
@ -2268,7 +2284,7 @@ struct hwrm_func_resource_qcaps_input {
|
|||
u8 unused_0[6];
|
||||
};
|
||||
|
||||
/* hwrm_func_resource_qcaps_output (size:448b/56B) */
|
||||
/* hwrm_func_resource_qcaps_output (size:512b/64B) */
|
||||
struct hwrm_func_resource_qcaps_output {
|
||||
__le16 error_code;
|
||||
__le16 req_type;
|
||||
|
@ -2300,11 +2316,15 @@ struct hwrm_func_resource_qcaps_output {
|
|||
__le16 max_tx_scheduler_inputs;
|
||||
__le16 flags;
|
||||
#define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL
|
||||
__le16 min_tx_key_ctxs;
|
||||
__le16 max_tx_key_ctxs;
|
||||
__le16 min_rx_key_ctxs;
|
||||
__le16 max_rx_key_ctxs;
|
||||
u8 unused_0[5];
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
/* hwrm_func_vf_resource_cfg_input (size:448b/56B) */
|
||||
/* hwrm_func_vf_resource_cfg_input (size:512b/64B) */
|
||||
struct hwrm_func_vf_resource_cfg_input {
|
||||
__le16 req_type;
|
||||
__le16 cmpl_ring;
|
||||
|
@ -2331,6 +2351,10 @@ struct hwrm_func_vf_resource_cfg_input {
|
|||
__le16 max_hw_ring_grps;
|
||||
__le16 flags;
|
||||
#define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL
|
||||
__le16 min_tx_key_ctxs;
|
||||
__le16 max_tx_key_ctxs;
|
||||
__le16 min_rx_key_ctxs;
|
||||
__le16 max_rx_key_ctxs;
|
||||
u8 unused_0[2];
|
||||
};
|
||||
|
||||
|
@ -2348,7 +2372,9 @@ struct hwrm_func_vf_resource_cfg_output {
|
|||
__le16 reserved_vnics;
|
||||
__le16 reserved_stat_ctx;
|
||||
__le16 reserved_hw_ring_grps;
|
||||
u8 unused_0[7];
|
||||
__le16 reserved_tx_key_ctxs;
|
||||
__le16 reserved_rx_key_ctxs;
|
||||
u8 unused_0[3];
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
|
@ -4220,7 +4246,7 @@ struct hwrm_port_lpbk_clr_stats_output {
|
|||
u8 valid;
|
||||
};
|
||||
|
||||
/* hwrm_port_ts_query_input (size:256b/32B) */
|
||||
/* hwrm_port_ts_query_input (size:320b/40B) */
|
||||
struct hwrm_port_ts_query_input {
|
||||
__le16 req_type;
|
||||
__le16 cmpl_ring;
|
||||
|
@ -4238,8 +4264,11 @@ struct hwrm_port_ts_query_input {
|
|||
__le16 enables;
|
||||
#define PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT 0x1UL
|
||||
#define PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID 0x2UL
|
||||
#define PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET 0x4UL
|
||||
__le16 ts_req_timeout;
|
||||
__le32 ptp_seq_id;
|
||||
__le16 ptp_hdr_offset;
|
||||
u8 unused_1[6];
|
||||
};
|
||||
|
||||
/* hwrm_port_ts_query_output (size:192b/24B) */
|
||||
|
@ -8172,6 +8201,7 @@ struct hwrm_fw_reset_input {
|
|||
u8 host_idx;
|
||||
u8 flags;
|
||||
#define FW_RESET_REQ_FLAGS_RESET_GRACEFUL 0x1UL
|
||||
#define FW_RESET_REQ_FLAGS_FW_ACTIVATION 0x2UL
|
||||
u8 unused_0[4];
|
||||
};
|
||||
|
||||
|
@ -8952,7 +8982,7 @@ struct hwrm_nvm_get_dir_info_output {
|
|||
u8 valid;
|
||||
};
|
||||
|
||||
/* hwrm_nvm_write_input (size:384b/48B) */
|
||||
/* hwrm_nvm_write_input (size:448b/56B) */
|
||||
struct hwrm_nvm_write_input {
|
||||
__le16 req_type;
|
||||
__le16 cmpl_ring;
|
||||
|
@ -8968,7 +8998,11 @@ struct hwrm_nvm_write_input {
|
|||
__le16 option;
|
||||
__le16 flags;
|
||||
#define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
|
||||
#define NVM_WRITE_REQ_FLAGS_BATCH_MODE 0x2UL
|
||||
#define NVM_WRITE_REQ_FLAGS_BATCH_LAST 0x4UL
|
||||
__le32 dir_item_length;
|
||||
__le32 offset;
|
||||
__le32 len;
|
||||
__le32 unused_0;
|
||||
};
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
#include "bnxt.h"
|
||||
#include "bnxt_ptp.h"
|
||||
|
||||
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id)
|
||||
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off)
|
||||
{
|
||||
unsigned int ptp_class;
|
||||
struct ptp_header *hdr;
|
||||
|
@ -34,6 +34,7 @@ int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id)
|
|||
if (!hdr)
|
||||
return -EINVAL;
|
||||
|
||||
*hdr_off = (u8 *)hdr - skb->data;
|
||||
*seq_id = ntohs(hdr->sequence_id);
|
||||
return 0;
|
||||
default:
|
||||
|
@ -91,6 +92,7 @@ static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
|
|||
PORT_TS_QUERY_REQ_FLAGS_PATH_TX) {
|
||||
req.enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES);
|
||||
req.ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid);
|
||||
req.ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off);
|
||||
req.ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT);
|
||||
}
|
||||
mutex_lock(&bp->hwrm_cmd_lock);
|
||||
|
|
|
@ -10,8 +10,8 @@
|
|||
#ifndef BNXT_PTP_H
|
||||
#define BNXT_PTP_H
|
||||
|
||||
#define BNXT_PTP_GRC_WIN 5
|
||||
#define BNXT_PTP_GRC_WIN_BASE 0x5000
|
||||
#define BNXT_PTP_GRC_WIN 6
|
||||
#define BNXT_PTP_GRC_WIN_BASE 0x6000
|
||||
|
||||
#define BNXT_MAX_PHC_DRIFT 31000000
|
||||
#define BNXT_LO_TIMER_MASK 0x0000ffffffffUL
|
||||
|
@ -19,7 +19,8 @@
|
|||
|
||||
#define BNXT_PTP_QTS_TIMEOUT 1000
|
||||
#define BNXT_PTP_QTS_TX_ENABLES (PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID | \
|
||||
PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT)
|
||||
PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT | \
|
||||
PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET)
|
||||
|
||||
struct bnxt_ptp_cfg {
|
||||
struct ptp_clock_info ptp_info;
|
||||
|
@ -37,6 +38,7 @@ struct bnxt_ptp_cfg {
|
|||
#define BNXT_PHC_OVERFLOW_PERIOD (19 * 3600 * HZ)
|
||||
|
||||
u16 tx_seqid;
|
||||
u16 tx_hdr_off;
|
||||
struct bnxt *bp;
|
||||
atomic_t tx_avail;
|
||||
#define BNXT_MAX_TX_TS 1
|
||||
|
@ -74,7 +76,7 @@ do { \
|
|||
((dst) = READ_ONCE(src))
|
||||
#endif
|
||||
|
||||
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id);
|
||||
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off);
|
||||
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
|
||||
int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
|
||||
int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb);
|
||||
|
|
|
@ -1506,11 +1506,6 @@ static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
|
|||
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
|
||||
|
||||
iavf_map_rings_to_vectors(adapter);
|
||||
|
||||
if (RSS_AQ(adapter))
|
||||
adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
|
||||
else
|
||||
err = iavf_init_rss(adapter);
|
||||
err:
|
||||
return err;
|
||||
}
|
||||
|
@ -2200,6 +2195,14 @@ static void iavf_reset_task(struct work_struct *work)
|
|||
goto reset_err;
|
||||
}
|
||||
|
||||
if (RSS_AQ(adapter)) {
|
||||
adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
|
||||
} else {
|
||||
err = iavf_init_rss(adapter);
|
||||
if (err)
|
||||
goto reset_err;
|
||||
}
|
||||
|
||||
adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
|
||||
adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
|
||||
|
||||
|
|
|
@ -234,6 +234,7 @@ enum ice_pf_state {
|
|||
ICE_VFLR_EVENT_PENDING,
|
||||
ICE_FLTR_OVERFLOW_PROMISC,
|
||||
ICE_VF_DIS,
|
||||
ICE_VF_DEINIT_IN_PROGRESS,
|
||||
ICE_CFG_BUSY,
|
||||
ICE_SERVICE_SCHED,
|
||||
ICE_SERVICE_DIS,
|
||||
|
|
|
@ -191,6 +191,14 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
|
|||
struct ice_netdev_priv *np = netdev_priv(netdev);
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
|
||||
/* Under some circumstances, we might receive a request to delete our
|
||||
* own device address from our uc list. Because we store the device
|
||||
* address in the VSI's MAC filter list, we need to ignore such
|
||||
* requests and not delete our device address from this list.
|
||||
*/
|
||||
if (ether_addr_equal(addr, netdev->dev_addr))
|
||||
return 0;
|
||||
|
||||
if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
|
||||
ICE_FWD_TO_VSI))
|
||||
return -EINVAL;
|
||||
|
@ -4194,6 +4202,11 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
|
|||
struct ice_hw *hw;
|
||||
int i, err;
|
||||
|
||||
if (pdev->is_virtfn) {
|
||||
dev_err(dev, "can't probe a virtual function\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* this driver uses devres, see
|
||||
* Documentation/driver-api/driver-model/devres.rst
|
||||
*/
|
||||
|
@ -5119,7 +5132,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
|
|||
return -EADDRNOTAVAIL;
|
||||
|
||||
if (ether_addr_equal(netdev->dev_addr, mac)) {
|
||||
netdev_warn(netdev, "already using mac %pM\n", mac);
|
||||
netdev_dbg(netdev, "already using mac %pM\n", mac);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -5130,6 +5143,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
netif_addr_lock_bh(netdev);
|
||||
/* Clean up old MAC filter. Not an error if old filter doesn't exist */
|
||||
status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI);
|
||||
if (status && status != ICE_ERR_DOES_NOT_EXIST) {
|
||||
|
@ -5139,30 +5153,28 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
|
|||
|
||||
/* Add filter for new MAC. If filter exists, return success */
|
||||
status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
|
||||
if (status == ICE_ERR_ALREADY_EXISTS) {
|
||||
if (status == ICE_ERR_ALREADY_EXISTS)
|
||||
/* Although this MAC filter is already present in hardware it's
|
||||
* possible in some cases (e.g. bonding) that dev_addr was
|
||||
* modified outside of the driver and needs to be restored back
|
||||
* to this value.
|
||||
*/
|
||||
memcpy(netdev->dev_addr, mac, netdev->addr_len);
|
||||
netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* error if the new filter addition failed */
|
||||
if (status)
|
||||
else if (status)
|
||||
/* error if the new filter addition failed */
|
||||
err = -EADDRNOTAVAIL;
|
||||
|
||||
err_update_filters:
|
||||
if (err) {
|
||||
netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
|
||||
mac);
|
||||
netif_addr_unlock_bh(netdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* change the netdev's MAC address */
|
||||
memcpy(netdev->dev_addr, mac, netdev->addr_len);
|
||||
netif_addr_unlock_bh(netdev);
|
||||
netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
|
||||
netdev->dev_addr);
|
||||
|
||||
|
|
|
@ -615,6 +615,8 @@ void ice_free_vfs(struct ice_pf *pf)
|
|||
struct ice_hw *hw = &pf->hw;
|
||||
unsigned int tmp, i;
|
||||
|
||||
set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
|
||||
|
||||
if (!pf->vf)
|
||||
return;
|
||||
|
||||
|
@ -680,6 +682,7 @@ void ice_free_vfs(struct ice_pf *pf)
|
|||
i);
|
||||
|
||||
clear_bit(ICE_VF_DIS, pf->state);
|
||||
clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
|
||||
clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
|
||||
}
|
||||
|
||||
|
@ -4415,6 +4418,10 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
|
|||
struct device *dev;
|
||||
int err = 0;
|
||||
|
||||
/* if de-init is underway, don't process messages from VF */
|
||||
if (test_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state))
|
||||
return;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
if (ice_validate_vf_id(pf, vf_id)) {
|
||||
err = -EINVAL;
|
||||
|
|
|
@ -938,7 +938,7 @@ enum mvpp22_ptp_packet_format {
|
|||
#define MVPP2_BM_COOKIE_POOL_OFFS 8
|
||||
#define MVPP2_BM_COOKIE_CPU_OFFS 24
|
||||
|
||||
#define MVPP2_BM_SHORT_FRAME_SIZE 704 /* frame size 128 */
|
||||
#define MVPP2_BM_SHORT_FRAME_SIZE 736 /* frame size 128 */
|
||||
#define MVPP2_BM_LONG_FRAME_SIZE 2240 /* frame size 1664 */
|
||||
#define MVPP2_BM_JUMBO_FRAME_SIZE 10432 /* frame size 9856 */
|
||||
/* BM short pool packet size
|
||||
|
|
|
@ -748,7 +748,7 @@ static void
|
|||
prestera_fdb_offload_notify(struct prestera_port *port,
|
||||
struct switchdev_notifier_fdb_info *info)
|
||||
{
|
||||
struct switchdev_notifier_fdb_info send_info;
|
||||
struct switchdev_notifier_fdb_info send_info = {};
|
||||
|
||||
send_info.addr = info->addr;
|
||||
send_info.vid = info->vid;
|
||||
|
@ -1123,7 +1123,7 @@ static int prestera_switchdev_blk_event(struct notifier_block *unused,
|
|||
static void prestera_fdb_event(struct prestera_switch *sw,
|
||||
struct prestera_event *evt, void *arg)
|
||||
{
|
||||
struct switchdev_notifier_fdb_info info;
|
||||
struct switchdev_notifier_fdb_info info = {};
|
||||
struct net_device *dev = NULL;
|
||||
struct prestera_port *port;
|
||||
struct prestera_lag *lag;
|
||||
|
|
|
@ -134,6 +134,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
|||
cq->cqn);
|
||||
|
||||
cq->uar = dev->priv.uar;
|
||||
cq->irqn = eq->core.irqn;
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -1019,12 +1019,19 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
|
|||
MLX5_NB_INIT(&tracer->nb, fw_tracer_event, DEVICE_TRACER);
|
||||
mlx5_eq_notifier_register(dev, &tracer->nb);
|
||||
|
||||
mlx5_fw_tracer_start(tracer);
|
||||
|
||||
err = mlx5_fw_tracer_start(tracer);
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "FWTracer: Failed to start tracer %d\n", err);
|
||||
goto err_notifier_unregister;
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_notifier_unregister:
|
||||
mlx5_eq_notifier_unregister(dev, &tracer->nb);
|
||||
mlx5_core_destroy_mkey(dev, &tracer->buff.mkey);
|
||||
err_dealloc_pd:
|
||||
mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
|
||||
cancel_work_sync(&tracer->read_fw_strings_work);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -124,6 +124,11 @@ static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
|
|||
if (IS_ERR(rt))
|
||||
return PTR_ERR(rt);
|
||||
|
||||
if (rt->rt_type != RTN_UNICAST) {
|
||||
ret = -ENETUNREACH;
|
||||
goto err_rt_release;
|
||||
}
|
||||
|
||||
if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) {
|
||||
ret = -ENETUNREACH;
|
||||
goto err_rt_release;
|
||||
|
|
|
@ -1535,15 +1535,9 @@ static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
|
|||
{
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
struct mlx5_core_cq *mcq = &cq->mcq;
|
||||
int eqn_not_used;
|
||||
unsigned int irqn;
|
||||
int err;
|
||||
u32 i;
|
||||
|
||||
err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq,
|
||||
&cq->wq_ctrl);
|
||||
if (err)
|
||||
|
@ -1557,7 +1551,6 @@ static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
|
|||
mcq->vector = param->eq_ix;
|
||||
mcq->comp = mlx5e_completion_event;
|
||||
mcq->event = mlx5e_cq_error_event;
|
||||
mcq->irqn = irqn;
|
||||
|
||||
for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
|
||||
struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
|
||||
|
@ -1605,11 +1598,10 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
|||
void *in;
|
||||
void *cqc;
|
||||
int inlen;
|
||||
unsigned int irqn_not_used;
|
||||
int eqn;
|
||||
int err;
|
||||
|
||||
err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
|
||||
err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -1891,30 +1883,30 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
|
|||
if (err)
|
||||
goto err_close_icosq;
|
||||
|
||||
err = mlx5e_open_rxq_rq(c, params, &cparam->rq);
|
||||
if (err)
|
||||
goto err_close_sqs;
|
||||
|
||||
if (c->xdp) {
|
||||
err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL,
|
||||
&c->rq_xdpsq, false);
|
||||
if (err)
|
||||
goto err_close_sqs;
|
||||
goto err_close_rq;
|
||||
}
|
||||
|
||||
err = mlx5e_open_rxq_rq(c, params, &cparam->rq);
|
||||
if (err)
|
||||
goto err_close_xdp_sq;
|
||||
|
||||
err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->xdpsq, true);
|
||||
if (err)
|
||||
goto err_close_rq;
|
||||
goto err_close_xdp_sq;
|
||||
|
||||
return 0;
|
||||
|
||||
err_close_rq:
|
||||
mlx5e_close_rq(&c->rq);
|
||||
|
||||
err_close_xdp_sq:
|
||||
if (c->xdp)
|
||||
mlx5e_close_xdpsq(&c->rq_xdpsq);
|
||||
|
||||
err_close_rq:
|
||||
mlx5e_close_rq(&c->rq);
|
||||
|
||||
err_close_sqs:
|
||||
mlx5e_close_sqs(c);
|
||||
|
||||
|
@ -1949,9 +1941,9 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
|
|||
static void mlx5e_close_queues(struct mlx5e_channel *c)
|
||||
{
|
||||
mlx5e_close_xdpsq(&c->xdpsq);
|
||||
mlx5e_close_rq(&c->rq);
|
||||
if (c->xdp)
|
||||
mlx5e_close_xdpsq(&c->rq_xdpsq);
|
||||
mlx5e_close_rq(&c->rq);
|
||||
mlx5e_close_sqs(c);
|
||||
mlx5e_close_icosq(&c->icosq);
|
||||
mlx5e_close_icosq(&c->async_icosq);
|
||||
|
@ -1983,9 +1975,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
|||
struct mlx5e_channel *c;
|
||||
unsigned int irq;
|
||||
int err;
|
||||
int eqn;
|
||||
|
||||
err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
|
||||
err = mlx5_vector2irqn(priv->mdev, ix, &irq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
|
|
@ -855,8 +855,8 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
|
|||
return err;
|
||||
}
|
||||
|
||||
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
|
||||
unsigned int *irqn)
|
||||
static int vector2eqnirqn(struct mlx5_core_dev *dev, int vector, int *eqn,
|
||||
unsigned int *irqn)
|
||||
{
|
||||
struct mlx5_eq_table *table = dev->priv.eq_table;
|
||||
struct mlx5_eq_comp *eq, *n;
|
||||
|
@ -865,8 +865,10 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
|
|||
|
||||
list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
|
||||
if (i++ == vector) {
|
||||
*eqn = eq->core.eqn;
|
||||
*irqn = eq->core.irqn;
|
||||
if (irqn)
|
||||
*irqn = eq->core.irqn;
|
||||
if (eqn)
|
||||
*eqn = eq->core.eqn;
|
||||
err = 0;
|
||||
break;
|
||||
}
|
||||
|
@ -874,8 +876,18 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
|
|||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn)
|
||||
{
|
||||
return vector2eqnirqn(dev, vector, eqn, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_vector2eqn);
|
||||
|
||||
int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn)
|
||||
{
|
||||
return vector2eqnirqn(dev, vector, NULL, irqn);
|
||||
}
|
||||
|
||||
unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return dev->priv.eq_table->num_comp_eqs;
|
||||
|
|
|
@ -69,7 +69,7 @@ static void
|
|||
mlx5_esw_bridge_fdb_offload_notify(struct net_device *dev, const unsigned char *addr, u16 vid,
|
||||
unsigned long val)
|
||||
{
|
||||
struct switchdev_notifier_fdb_info send_info;
|
||||
struct switchdev_notifier_fdb_info send_info = {};
|
||||
|
||||
send_info.addr = addr;
|
||||
send_info.vid = vid;
|
||||
|
@ -579,7 +579,7 @@ static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
|
|||
xa_init(&bridge->vports);
|
||||
bridge->ifindex = ifindex;
|
||||
bridge->refcnt = 1;
|
||||
bridge->ageing_time = BR_DEFAULT_AGEING_TIME;
|
||||
bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
|
||||
list_add(&bridge->list, &br_offloads->bridges);
|
||||
|
||||
return bridge;
|
||||
|
@ -1006,7 +1006,7 @@ int mlx5_esw_bridge_ageing_time_set(unsigned long ageing_time, struct mlx5_eswit
|
|||
if (!vport->bridge)
|
||||
return -EINVAL;
|
||||
|
||||
vport->bridge->ageing_time = ageing_time;
|
||||
vport->bridge->ageing_time = clock_t_to_jiffies(ageing_time);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -501,6 +501,7 @@ mlx5_esw_sample_offload(struct mlx5_esw_psample *esw_psample,
|
|||
err_offload_rule:
|
||||
mlx5_esw_vporttbl_put(esw, &per_vport_tbl_attr);
|
||||
err_default_tbl:
|
||||
kfree(sample_flow);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
|
|
|
@ -48,6 +48,7 @@
|
|||
#include "lib/fs_chains.h"
|
||||
#include "en_tc.h"
|
||||
#include "en/mapping.h"
|
||||
#include "devlink.h"
|
||||
|
||||
#define mlx5_esw_for_each_rep(esw, i, rep) \
|
||||
xa_for_each(&((esw)->offloads.vport_reps), i, rep)
|
||||
|
@ -3001,12 +3002,19 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
|
|||
if (cur_mlx5_mode == mlx5_mode)
|
||||
goto unlock;
|
||||
|
||||
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
|
||||
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
|
||||
if (mlx5_devlink_trap_get_num_active(esw->dev)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Can't change mode while devlink traps are active");
|
||||
err = -EOPNOTSUPP;
|
||||
goto unlock;
|
||||
}
|
||||
err = esw_offloads_start(esw, extack);
|
||||
else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
|
||||
} else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
|
||||
err = esw_offloads_stop(esw, extack);
|
||||
else
|
||||
} else {
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
unlock:
|
||||
mlx5_esw_unlock(esw);
|
||||
|
|
|
@ -417,7 +417,6 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
|
|||
struct mlx5_wq_param wqp;
|
||||
struct mlx5_cqe64 *cqe;
|
||||
int inlen, err, eqn;
|
||||
unsigned int irqn;
|
||||
void *cqc, *in;
|
||||
__be64 *pas;
|
||||
u32 i;
|
||||
|
@ -446,7 +445,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
|
|||
goto err_cqwq;
|
||||
}
|
||||
|
||||
err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
|
||||
err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn);
|
||||
if (err) {
|
||||
kvfree(in);
|
||||
goto err_cqwq;
|
||||
|
@ -476,7 +475,6 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
|
|||
*conn->cq.mcq.arm_db = 0;
|
||||
conn->cq.mcq.vector = 0;
|
||||
conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete;
|
||||
conn->cq.mcq.irqn = irqn;
|
||||
conn->cq.mcq.uar = fdev->conn_res.uar;
|
||||
tasklet_setup(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet);
|
||||
|
||||
|
|
|
@ -104,4 +104,6 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
|
|||
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
|
||||
#endif
|
||||
|
||||
int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1784,16 +1784,14 @@ static int __init init(void)
|
|||
if (err)
|
||||
goto err_sf;
|
||||
|
||||
#ifdef CONFIG_MLX5_CORE_EN
|
||||
err = mlx5e_init();
|
||||
if (err) {
|
||||
pci_unregister_driver(&mlx5_core_driver);
|
||||
goto err_debug;
|
||||
}
|
||||
#endif
|
||||
if (err)
|
||||
goto err_en;
|
||||
|
||||
return 0;
|
||||
|
||||
err_en:
|
||||
mlx5_sf_driver_unregister();
|
||||
err_sf:
|
||||
pci_unregister_driver(&mlx5_core_driver);
|
||||
err_debug:
|
||||
|
@ -1803,9 +1801,7 @@ static int __init init(void)
|
|||
|
||||
static void __exit cleanup(void)
|
||||
{
|
||||
#ifdef CONFIG_MLX5_CORE_EN
|
||||
mlx5e_cleanup();
|
||||
#endif
|
||||
mlx5_sf_driver_unregister();
|
||||
pci_unregister_driver(&mlx5_core_driver);
|
||||
mlx5_unregister_debugfs();
|
||||
|
|
|
@ -206,8 +206,13 @@ int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw,
|
|||
int mlx5_fw_version_query(struct mlx5_core_dev *dev,
|
||||
u32 *running_ver, u32 *stored_ver);
|
||||
|
||||
#ifdef CONFIG_MLX5_CORE_EN
|
||||
int mlx5e_init(void);
|
||||
void mlx5e_cleanup(void);
|
||||
#else
|
||||
static inline int mlx5e_init(void){ return 0; }
|
||||
static inline void mlx5e_cleanup(void){}
|
||||
#endif
|
||||
|
||||
static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
|
||||
{
|
||||
|
|
|
@ -214,6 +214,7 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
|
|||
err = -ENOMEM;
|
||||
goto err_cpumask;
|
||||
}
|
||||
irq->pool = pool;
|
||||
kref_init(&irq->kref);
|
||||
irq->index = i;
|
||||
err = xa_err(xa_store(&pool->irqs, irq->index, irq, GFP_KERNEL));
|
||||
|
@ -222,7 +223,6 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
|
|||
irq->index, err);
|
||||
goto err_xa;
|
||||
}
|
||||
irq->pool = pool;
|
||||
return irq;
|
||||
err_xa:
|
||||
free_cpumask_var(irq->mask);
|
||||
|
@ -251,8 +251,11 @@ int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
|
|||
|
||||
int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
err = atomic_notifier_chain_unregister(&irq->nh, nb);
|
||||
irq_put(irq);
|
||||
return atomic_notifier_chain_unregister(&irq->nh, nb);
|
||||
return err;
|
||||
}
|
||||
|
||||
struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq)
|
||||
|
@ -437,6 +440,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
|
|||
if (!pool)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
pool->dev = dev;
|
||||
mutex_init(&pool->lock);
|
||||
xa_init_flags(&pool->irqs, XA_FLAGS_ALLOC);
|
||||
pool->xa_num_irqs.min = start;
|
||||
pool->xa_num_irqs.max = start + size - 1;
|
||||
|
@ -445,7 +449,6 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
|
|||
name);
|
||||
pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
|
||||
pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
|
||||
mutex_init(&pool->lock);
|
||||
mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
|
||||
name, size, start);
|
||||
return pool;
|
||||
|
@ -459,6 +462,7 @@ static void irq_pool_free(struct mlx5_irq_pool *pool)
|
|||
xa_for_each(&pool->irqs, index, irq)
|
||||
irq_release(&irq->kref);
|
||||
xa_destroy(&pool->irqs);
|
||||
mutex_destroy(&pool->lock);
|
||||
kvfree(pool);
|
||||
}
|
||||
|
||||
|
|
|
@ -749,7 +749,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
|
|||
struct mlx5_cqe64 *cqe;
|
||||
struct mlx5dr_cq *cq;
|
||||
int inlen, err, eqn;
|
||||
unsigned int irqn;
|
||||
void *cqc, *in;
|
||||
__be64 *pas;
|
||||
int vector;
|
||||
|
@ -782,7 +781,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
|
|||
goto err_cqwq;
|
||||
|
||||
vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
|
||||
err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
|
||||
err = mlx5_vector2eqn(mdev, vector, &eqn);
|
||||
if (err) {
|
||||
kvfree(in);
|
||||
goto err_cqwq;
|
||||
|
@ -818,7 +817,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
|
|||
*cq->mcq.arm_db = cpu_to_be32(2 << 28);
|
||||
|
||||
cq->mcq.vector = 0;
|
||||
cq->mcq.irqn = irqn;
|
||||
cq->mcq.uar = uar;
|
||||
|
||||
return cq;
|
||||
|
|
|
@ -352,6 +352,7 @@ static void dr_ste_v0_set_rx_decap(u8 *hw_ste_p)
|
|||
{
|
||||
MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
|
||||
DR_STE_TUNL_ACTION_DECAP);
|
||||
MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1);
|
||||
}
|
||||
|
||||
static void dr_ste_v0_set_rx_pop_vlan(u8 *hw_ste_p)
|
||||
|
@ -365,6 +366,7 @@ static void dr_ste_v0_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
|
|||
MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
|
||||
DR_STE_TUNL_ACTION_L3_DECAP);
|
||||
MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
|
||||
MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1);
|
||||
}
|
||||
|
||||
static void dr_ste_v0_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
|
||||
|
|
|
@ -9079,7 +9079,7 @@ mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
|
|||
|
||||
static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
|
||||
{
|
||||
struct switchdev_notifier_fdb_info info;
|
||||
struct switchdev_notifier_fdb_info info = {};
|
||||
struct net_device *dev;
|
||||
|
||||
dev = br_fdb_find_port(rif->dev, mac, 0);
|
||||
|
@ -9127,8 +9127,8 @@ mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
|
|||
|
||||
static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
|
||||
{
|
||||
struct switchdev_notifier_fdb_info info = {};
|
||||
u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
|
||||
struct switchdev_notifier_fdb_info info;
|
||||
struct net_device *br_dev;
|
||||
struct net_device *dev;
|
||||
|
||||
|
|
|
@ -2508,7 +2508,7 @@ mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
|
|||
const char *mac, u16 vid,
|
||||
struct net_device *dev, bool offloaded)
|
||||
{
|
||||
struct switchdev_notifier_fdb_info info;
|
||||
struct switchdev_notifier_fdb_info info = {};
|
||||
|
||||
info.addr = mac;
|
||||
info.vid = vid;
|
||||
|
|
|
@ -277,7 +277,7 @@ static void sparx5_fdb_call_notifiers(enum switchdev_notifier_type type,
|
|||
const char *mac, u16 vid,
|
||||
struct net_device *dev, bool offloaded)
|
||||
{
|
||||
struct switchdev_notifier_fdb_info info;
|
||||
struct switchdev_notifier_fdb_info info = {};
|
||||
|
||||
info.addr = mac;
|
||||
info.vid = vid;
|
||||
|
|
|
@ -21,7 +21,7 @@ u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset)
|
|||
ocelot->map[target][reg & REG_MASK] + offset, &val);
|
||||
return val;
|
||||
}
|
||||
EXPORT_SYMBOL(__ocelot_read_ix);
|
||||
EXPORT_SYMBOL_GPL(__ocelot_read_ix);
|
||||
|
||||
void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset)
|
||||
{
|
||||
|
@ -32,7 +32,7 @@ void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset)
|
|||
regmap_write(ocelot->targets[target],
|
||||
ocelot->map[target][reg & REG_MASK] + offset, val);
|
||||
}
|
||||
EXPORT_SYMBOL(__ocelot_write_ix);
|
||||
EXPORT_SYMBOL_GPL(__ocelot_write_ix);
|
||||
|
||||
void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
|
||||
u32 offset)
|
||||
|
@ -45,7 +45,7 @@ void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
|
|||
ocelot->map[target][reg & REG_MASK] + offset,
|
||||
mask, val);
|
||||
}
|
||||
EXPORT_SYMBOL(__ocelot_rmw_ix);
|
||||
EXPORT_SYMBOL_GPL(__ocelot_rmw_ix);
|
||||
|
||||
u32 ocelot_port_readl(struct ocelot_port *port, u32 reg)
|
||||
{
|
||||
|
@ -58,7 +58,7 @@ u32 ocelot_port_readl(struct ocelot_port *port, u32 reg)
|
|||
regmap_read(port->target, ocelot->map[target][reg & REG_MASK], &val);
|
||||
return val;
|
||||
}
|
||||
EXPORT_SYMBOL(ocelot_port_readl);
|
||||
EXPORT_SYMBOL_GPL(ocelot_port_readl);
|
||||
|
||||
void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
|
||||
{
|
||||
|
@ -69,7 +69,7 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
|
|||
|
||||
regmap_write(port->target, ocelot->map[target][reg & REG_MASK], val);
|
||||
}
|
||||
EXPORT_SYMBOL(ocelot_port_writel);
|
||||
EXPORT_SYMBOL_GPL(ocelot_port_writel);
|
||||
|
||||
void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
|
||||
{
|
||||
|
@ -77,7 +77,7 @@ void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
|
|||
|
||||
ocelot_port_writel(port, (cur & (~mask)) | val, reg);
|
||||
}
|
||||
EXPORT_SYMBOL(ocelot_port_rmwl);
|
||||
EXPORT_SYMBOL_GPL(ocelot_port_rmwl);
|
||||
|
||||
u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
|
||||
u32 reg, u32 offset)
|
||||
|
@ -128,7 +128,7 @@ int ocelot_regfields_init(struct ocelot *ocelot,
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ocelot_regfields_init);
|
||||
EXPORT_SYMBOL_GPL(ocelot_regfields_init);
|
||||
|
||||
static struct regmap_config ocelot_regmap_config = {
|
||||
.reg_bits = 32,
|
||||
|
@ -148,4 +148,4 @@ struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res)
|
|||
|
||||
return devm_regmap_init_mmio(ocelot->dev, regs, &ocelot_regmap_config);
|
||||
}
|
||||
EXPORT_SYMBOL(ocelot_regmap_init);
|
||||
EXPORT_SYMBOL_GPL(ocelot_regmap_init);
|
||||
|
|
|
@ -3502,12 +3502,16 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
|
|||
RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
|
||||
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
|
||||
|
||||
/* The default value is 0x13. Change it to 0x2f */
|
||||
rtl_csi_access_enable(tp, 0x2f);
|
||||
|
||||
rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
|
||||
|
||||
/* disable EEE */
|
||||
rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
|
||||
|
||||
rtl_pcie_state_l2l3_disable(tp);
|
||||
rtl_hw_aspm_clkreq_enable(tp, true);
|
||||
}
|
||||
|
||||
DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond)
|
||||
|
|
|
@ -2715,7 +2715,7 @@ static void
|
|||
rocker_fdb_offload_notify(struct rocker_port *rocker_port,
|
||||
struct switchdev_notifier_fdb_info *recv_info)
|
||||
{
|
||||
struct switchdev_notifier_fdb_info info;
|
||||
struct switchdev_notifier_fdb_info info = {};
|
||||
|
||||
info.addr = recv_info->addr;
|
||||
info.vid = recv_info->vid;
|
||||
|
|
|
@ -1822,7 +1822,7 @@ static void ofdpa_port_fdb_learn_work(struct work_struct *work)
|
|||
container_of(work, struct ofdpa_fdb_learn_work, work);
|
||||
bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE);
|
||||
bool learned = (lw->flags & OFDPA_OP_FLAG_LEARNED);
|
||||
struct switchdev_notifier_fdb_info info;
|
||||
struct switchdev_notifier_fdb_info info = {};
|
||||
|
||||
info.addr = lw->addr;
|
||||
info.vid = lw->vid;
|
||||
|
|
|
@ -358,7 +358,7 @@ static int am65_cpsw_port_obj_del(struct net_device *ndev, const void *ctx,
|
|||
static void am65_cpsw_fdb_offload_notify(struct net_device *ndev,
|
||||
struct switchdev_notifier_fdb_info *rcv)
|
||||
{
|
||||
struct switchdev_notifier_fdb_info info;
|
||||
struct switchdev_notifier_fdb_info info = {};
|
||||
|
||||
info.addr = rcv->addr;
|
||||
info.vid = rcv->vid;
|
||||
|
|
|
@ -920,7 +920,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
|
|||
struct cpdma_chan *txch;
|
||||
int ret, q_idx;
|
||||
|
||||
if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
|
||||
if (skb_put_padto(skb, READ_ONCE(priv->tx_packet_min))) {
|
||||
cpsw_err(priv, tx_err, "packet pad failed\n");
|
||||
ndev->stats.tx_dropped++;
|
||||
return NET_XMIT_DROP;
|
||||
|
@ -1100,7 +1100,7 @@ static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
|
|||
|
||||
for (i = 0; i < n; i++) {
|
||||
xdpf = frames[i];
|
||||
if (xdpf->len < CPSW_MIN_PACKET_SIZE)
|
||||
if (xdpf->len < READ_ONCE(priv->tx_packet_min))
|
||||
break;
|
||||
|
||||
if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port))
|
||||
|
@ -1389,6 +1389,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
|
|||
priv->dev = dev;
|
||||
priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
|
||||
priv->emac_port = i + 1;
|
||||
priv->tx_packet_min = CPSW_MIN_PACKET_SIZE;
|
||||
|
||||
if (is_valid_ether_addr(slave_data->mac_addr)) {
|
||||
ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
|
||||
|
@ -1686,6 +1687,7 @@ static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
|
|||
|
||||
priv = netdev_priv(sl_ndev);
|
||||
slave->port_vlan = vlan;
|
||||
WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE_VLAN);
|
||||
if (netif_running(sl_ndev))
|
||||
cpsw_port_add_switch_def_ale_entries(priv,
|
||||
slave);
|
||||
|
@ -1714,6 +1716,7 @@ static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
|
|||
|
||||
priv = netdev_priv(slave->ndev);
|
||||
slave->port_vlan = slave->data->dual_emac_res_vlan;
|
||||
WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE);
|
||||
cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
|
||||
}
|
||||
|
||||
|
|
|
@ -89,7 +89,8 @@ do { \
|
|||
|
||||
#define CPSW_POLL_WEIGHT 64
|
||||
#define CPSW_RX_VLAN_ENCAP_HDR_SIZE 4
|
||||
#define CPSW_MIN_PACKET_SIZE (VLAN_ETH_ZLEN)
|
||||
#define CPSW_MIN_PACKET_SIZE_VLAN (VLAN_ETH_ZLEN)
|
||||
#define CPSW_MIN_PACKET_SIZE (ETH_ZLEN)
|
||||
#define CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN +\
|
||||
ETH_FCS_LEN +\
|
||||
CPSW_RX_VLAN_ENCAP_HDR_SIZE)
|
||||
|
@ -380,6 +381,7 @@ struct cpsw_priv {
|
|||
u32 emac_port;
|
||||
struct cpsw_common *cpsw;
|
||||
int offload_fwd_mark;
|
||||
u32 tx_packet_min;
|
||||
};
|
||||
|
||||
#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
|
||||
|
|
|
@ -368,7 +368,7 @@ static int cpsw_port_obj_del(struct net_device *ndev, const void *ctx,
|
|||
static void cpsw_fdb_offload_notify(struct net_device *ndev,
|
||||
struct switchdev_notifier_fdb_info *rcv)
|
||||
{
|
||||
struct switchdev_notifier_fdb_info info;
|
||||
struct switchdev_notifier_fdb_info info = {};
|
||||
|
||||
info.addr = rcv->addr;
|
||||
info.vid = rcv->vid;
|
||||
|
|
|
@ -418,7 +418,7 @@ static int hwsim_new_edge_nl(struct sk_buff *msg, struct genl_info *info)
|
|||
struct hwsim_edge *e;
|
||||
u32 v0, v1;
|
||||
|
||||
if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
|
||||
if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] ||
|
||||
!info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -528,14 +528,14 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
|
|||
u32 v0, v1;
|
||||
u8 lqi;
|
||||
|
||||
if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
|
||||
if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] ||
|
||||
!info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
|
||||
return -EINVAL;
|
||||
|
||||
if (nla_parse_nested_deprecated(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], hwsim_edge_policy, NULL))
|
||||
return -EINVAL;
|
||||
|
||||
if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] &&
|
||||
if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] ||
|
||||
!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_LQI])
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -1089,7 +1089,7 @@ struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev,
|
|||
|
||||
xpcs = kzalloc(sizeof(*xpcs), GFP_KERNEL);
|
||||
if (!xpcs)
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
xpcs->mdiodev = mdiodev;
|
||||
|
||||
|
|
|
@ -1760,8 +1760,6 @@ static struct phy_driver ksphy_driver[] = {
|
|||
.name = "Micrel KSZ87XX Switch",
|
||||
/* PHY_BASIC_FEATURES */
|
||||
.config_init = kszphy_config_init,
|
||||
.config_aneg = ksz8873mll_config_aneg,
|
||||
.read_status = ksz8873mll_read_status,
|
||||
.match_phy_device = ksz8795_match_phy_device,
|
||||
.suspend = genphy_suspend,
|
||||
.resume = genphy_resume,
|
||||
|
|
|
@ -284,7 +284,7 @@ static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
|
|||
static int ppp_connect_channel(struct channel *pch, int unit);
|
||||
static int ppp_disconnect_channel(struct channel *pch);
|
||||
static void ppp_destroy_channel(struct channel *pch);
|
||||
static int unit_get(struct idr *p, void *ptr);
|
||||
static int unit_get(struct idr *p, void *ptr, int min);
|
||||
static int unit_set(struct idr *p, void *ptr, int n);
|
||||
static void unit_put(struct idr *p, int n);
|
||||
static void *unit_find(struct idr *p, int n);
|
||||
|
@ -1155,9 +1155,20 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
|
|||
mutex_lock(&pn->all_ppp_mutex);
|
||||
|
||||
if (unit < 0) {
|
||||
ret = unit_get(&pn->units_idr, ppp);
|
||||
ret = unit_get(&pn->units_idr, ppp, 0);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
if (!ifname_is_set) {
|
||||
while (1) {
|
||||
snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ret);
|
||||
if (!__dev_get_by_name(ppp->ppp_net, ppp->dev->name))
|
||||
break;
|
||||
unit_put(&pn->units_idr, ret);
|
||||
ret = unit_get(&pn->units_idr, ppp, ret + 1);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* Caller asked for a specific unit number. Fail with -EEXIST
|
||||
* if unavailable. For backward compatibility, return -EEXIST
|
||||
|
@ -1306,7 +1317,7 @@ static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
|
|||
* the PPP unit identifer as suffix (i.e. ppp<unit_id>). This allows
|
||||
* userspace to infer the device name using to the PPPIOCGUNIT ioctl.
|
||||
*/
|
||||
if (!tb[IFLA_IFNAME])
|
||||
if (!tb[IFLA_IFNAME] || !nla_len(tb[IFLA_IFNAME]) || !*(char *)nla_data(tb[IFLA_IFNAME]))
|
||||
conf.ifname_is_set = false;
|
||||
|
||||
err = ppp_dev_configure(src_net, dev, &conf);
|
||||
|
@ -3552,9 +3563,9 @@ static int unit_set(struct idr *p, void *ptr, int n)
|
|||
}
|
||||
|
||||
/* get new free unit number and associate pointer with it */
|
||||
static int unit_get(struct idr *p, void *ptr)
|
||||
static int unit_get(struct idr *p, void *ptr, int min)
|
||||
{
|
||||
return idr_alloc(p, ptr, 0, 0, GFP_KERNEL);
|
||||
return idr_alloc(p, ptr, min, 0, GFP_KERNEL);
|
||||
}
|
||||
|
||||
/* put unit number back to a pool */
|
||||
|
|
|
@ -41,14 +41,14 @@ struct mhi_wwan_dev {
|
|||
/* Increment RX budget and schedule RX refill if necessary */
|
||||
static void mhi_wwan_rx_budget_inc(struct mhi_wwan_dev *mhiwwan)
|
||||
{
|
||||
spin_lock(&mhiwwan->rx_lock);
|
||||
spin_lock_bh(&mhiwwan->rx_lock);
|
||||
|
||||
mhiwwan->rx_budget++;
|
||||
|
||||
if (test_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags))
|
||||
schedule_work(&mhiwwan->rx_refill);
|
||||
|
||||
spin_unlock(&mhiwwan->rx_lock);
|
||||
spin_unlock_bh(&mhiwwan->rx_lock);
|
||||
}
|
||||
|
||||
/* Decrement RX budget if non-zero and return true on success */
|
||||
|
@ -56,7 +56,7 @@ static bool mhi_wwan_rx_budget_dec(struct mhi_wwan_dev *mhiwwan)
|
|||
{
|
||||
bool ret = false;
|
||||
|
||||
spin_lock(&mhiwwan->rx_lock);
|
||||
spin_lock_bh(&mhiwwan->rx_lock);
|
||||
|
||||
if (mhiwwan->rx_budget) {
|
||||
mhiwwan->rx_budget--;
|
||||
|
@ -64,7 +64,7 @@ static bool mhi_wwan_rx_budget_dec(struct mhi_wwan_dev *mhiwwan)
|
|||
ret = true;
|
||||
}
|
||||
|
||||
spin_unlock(&mhiwwan->rx_lock);
|
||||
spin_unlock_bh(&mhiwwan->rx_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -130,9 +130,9 @@ static void mhi_wwan_ctrl_stop(struct wwan_port *port)
|
|||
{
|
||||
struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port);
|
||||
|
||||
spin_lock(&mhiwwan->rx_lock);
|
||||
spin_lock_bh(&mhiwwan->rx_lock);
|
||||
clear_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags);
|
||||
spin_unlock(&mhiwwan->rx_lock);
|
||||
spin_unlock_bh(&mhiwwan->rx_lock);
|
||||
|
||||
cancel_work_sync(&mhiwwan->rx_refill);
|
||||
|
||||
|
|
|
@ -164,11 +164,14 @@ static struct wwan_device *wwan_create_dev(struct device *parent)
|
|||
goto done_unlock;
|
||||
|
||||
id = ida_alloc(&wwan_dev_ids, GFP_KERNEL);
|
||||
if (id < 0)
|
||||
if (id < 0) {
|
||||
wwandev = ERR_PTR(id);
|
||||
goto done_unlock;
|
||||
}
|
||||
|
||||
wwandev = kzalloc(sizeof(*wwandev), GFP_KERNEL);
|
||||
if (!wwandev) {
|
||||
wwandev = ERR_PTR(-ENOMEM);
|
||||
ida_free(&wwan_dev_ids, id);
|
||||
goto done_unlock;
|
||||
}
|
||||
|
@ -182,7 +185,8 @@ static struct wwan_device *wwan_create_dev(struct device *parent)
|
|||
err = device_register(&wwandev->dev);
|
||||
if (err) {
|
||||
put_device(&wwandev->dev);
|
||||
wwandev = NULL;
|
||||
wwandev = ERR_PTR(err);
|
||||
goto done_unlock;
|
||||
}
|
||||
|
||||
done_unlock:
|
||||
|
@ -1014,8 +1018,8 @@ int wwan_register_ops(struct device *parent, const struct wwan_ops *ops,
|
|||
return -EINVAL;
|
||||
|
||||
wwandev = wwan_create_dev(parent);
|
||||
if (!wwandev)
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(wwandev))
|
||||
return PTR_ERR(wwandev);
|
||||
|
||||
if (WARN_ON(wwandev->ops)) {
|
||||
wwan_remove_dev(wwandev);
|
||||
|
|
|
@ -154,7 +154,7 @@ static int unregister_vclock(struct device *dev, void *data)
|
|||
struct ptp_clock *ptp = dev_get_drvdata(dev);
|
||||
struct ptp_clock_info *info = ptp->info;
|
||||
struct ptp_vclock *vclock;
|
||||
u8 *num = data;
|
||||
u32 *num = data;
|
||||
|
||||
vclock = info_to_vclock(info);
|
||||
dev_info(dev->parent, "delete virtual clock ptp%d\n",
|
||||
|
|
|
@ -279,7 +279,7 @@ static void qeth_l2_set_pnso_mode(struct qeth_card *card,
|
|||
|
||||
static void qeth_l2_dev2br_fdb_flush(struct qeth_card *card)
|
||||
{
|
||||
struct switchdev_notifier_fdb_info info;
|
||||
struct switchdev_notifier_fdb_info info = {};
|
||||
|
||||
QETH_CARD_TEXT(card, 2, "fdbflush");
|
||||
|
||||
|
@ -679,7 +679,7 @@ static void qeth_l2_dev2br_fdb_notify(struct qeth_card *card, u8 code,
|
|||
struct net_if_token *token,
|
||||
struct mac_addr_lnid *addr_lnid)
|
||||
{
|
||||
struct switchdev_notifier_fdb_info info;
|
||||
struct switchdev_notifier_fdb_info info = {};
|
||||
u8 ntfy_mac[ETH_ALEN];
|
||||
|
||||
ether_addr_copy(ntfy_mac, addr_lnid->mac);
|
||||
|
|
|
@ -526,7 +526,6 @@ static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent)
|
|||
void __iomem *uar_page = ndev->mvdev.res.uar->map;
|
||||
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
|
||||
struct mlx5_vdpa_cq *vcq = &mvq->cq;
|
||||
unsigned int irqn;
|
||||
__be64 *pas;
|
||||
int inlen;
|
||||
void *cqc;
|
||||
|
@ -566,7 +565,7 @@ static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent)
|
|||
/* Use vector 0 by default. Consider adding code to choose least used
|
||||
* vector.
|
||||
*/
|
||||
err = mlx5_vector2eqn(mdev, 0, &eqn, &irqn);
|
||||
err = mlx5_vector2eqn(mdev, 0, &eqn);
|
||||
if (err)
|
||||
goto err_vec;
|
||||
|
||||
|
|
|
@ -201,8 +201,8 @@ static inline void bpf_cgroup_storage_unset(void)
|
|||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
|
||||
if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
|
||||
for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
|
||||
if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
|
||||
continue;
|
||||
|
||||
this_cpu_write(bpf_cgroup_storage_info[i].task, NULL);
|
||||
|
|
|
@ -41,7 +41,7 @@ struct in_device {
|
|||
unsigned long mr_qri; /* Query Response Interval */
|
||||
unsigned char mr_qrv; /* Query Robustness Variable */
|
||||
unsigned char mr_gq_running;
|
||||
unsigned char mr_ifc_count;
|
||||
u32 mr_ifc_count;
|
||||
struct timer_list mr_gq_timer; /* general query timer */
|
||||
struct timer_list mr_ifc_timer; /* interface change timer */
|
||||
|
||||
|
|
|
@ -1044,8 +1044,7 @@ void mlx5_unregister_debugfs(void);
|
|||
void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
|
||||
void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
|
||||
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
|
||||
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
|
||||
unsigned int *irqn);
|
||||
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn);
|
||||
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
|
||||
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
|
||||
|
||||
|
|
|
@ -196,6 +196,9 @@ struct ip_set_region {
|
|||
u32 elements; /* Number of elements vs timeout */
|
||||
};
|
||||
|
||||
/* Max range where every element is added/deleted in one step */
|
||||
#define IPSET_MAX_RANGE (1<<20)
|
||||
|
||||
/* The max revision number supported by any set type + 1 */
|
||||
#define IPSET_REVISION_MAX 9
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
bool __do_once_start(bool *done, unsigned long *flags);
|
||||
void __do_once_done(bool *done, struct static_key_true *once_key,
|
||||
unsigned long *flags);
|
||||
unsigned long *flags, struct module *mod);
|
||||
|
||||
/* Call a function exactly once. The idea of DO_ONCE() is to perform
|
||||
* a function call such as initialization of random seeds, etc, only
|
||||
|
@ -46,7 +46,7 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
|
|||
if (unlikely(___ret)) { \
|
||||
func(__VA_ARGS__); \
|
||||
__do_once_done(&___done, &___once_key, \
|
||||
&___flags); \
|
||||
&___flags, THIS_MODULE); \
|
||||
} \
|
||||
} \
|
||||
___ret; \
|
||||
|
|
|
@ -120,10 +120,11 @@ enum lockdown_reason {
|
|||
LOCKDOWN_MMIOTRACE,
|
||||
LOCKDOWN_DEBUGFS,
|
||||
LOCKDOWN_XMON_WR,
|
||||
LOCKDOWN_BPF_WRITE_USER,
|
||||
LOCKDOWN_INTEGRITY_MAX,
|
||||
LOCKDOWN_KCORE,
|
||||
LOCKDOWN_KPROBES,
|
||||
LOCKDOWN_BPF_READ,
|
||||
LOCKDOWN_BPF_READ_KERNEL,
|
||||
LOCKDOWN_PERF,
|
||||
LOCKDOWN_TRACEFS,
|
||||
LOCKDOWN_XMON_RW,
|
||||
|
|
|
@ -30,7 +30,6 @@ struct nf_tcp_net {
|
|||
u8 tcp_ignore_invalid_rst;
|
||||
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
|
||||
unsigned int offload_timeout;
|
||||
unsigned int offload_pickup;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
@ -44,7 +43,6 @@ struct nf_udp_net {
|
|||
unsigned int timeouts[UDP_CT_MAX];
|
||||
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
|
||||
unsigned int offload_timeout;
|
||||
unsigned int offload_pickup;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
|
|
@ -31,6 +31,8 @@ struct psample_group *psample_group_get(struct net *net, u32 group_num);
|
|||
void psample_group_take(struct psample_group *group);
|
||||
void psample_group_put(struct psample_group *group);
|
||||
|
||||
struct sk_buff;
|
||||
|
||||
#if IS_ENABLED(CONFIG_PSAMPLE)
|
||||
|
||||
void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
|
||||
|
|
|
@ -66,8 +66,11 @@ enum {
|
|||
#define NUD_NONE 0x00
|
||||
|
||||
/* NUD_NOARP & NUD_PERMANENT are pseudostates, they never change
|
||||
and make no address resolution or NUD.
|
||||
NUD_PERMANENT also cannot be deleted by garbage collectors.
|
||||
* and make no address resolution or NUD.
|
||||
* NUD_PERMANENT also cannot be deleted by garbage collectors.
|
||||
* When NTF_EXT_LEARNED is set for a bridge fdb entry the different cache entry
|
||||
* states don't make sense and thus are ignored. Such entries don't age and
|
||||
* can roam.
|
||||
*/
|
||||
|
||||
struct nda_cacheinfo {
|
||||
|
|
|
@ -43,6 +43,15 @@ enum nfnl_hook_chain_info_attributes {
|
|||
};
|
||||
#define NFNLA_HOOK_INFO_MAX (__NFNLA_HOOK_INFO_MAX - 1)
|
||||
|
||||
enum nfnl_hook_chain_desc_attributes {
|
||||
NFNLA_CHAIN_UNSPEC,
|
||||
NFNLA_CHAIN_TABLE,
|
||||
NFNLA_CHAIN_FAMILY,
|
||||
NFNLA_CHAIN_NAME,
|
||||
__NFNLA_CHAIN_MAX,
|
||||
};
|
||||
#define NFNLA_CHAIN_MAX (__NFNLA_CHAIN_MAX - 1)
|
||||
|
||||
/**
|
||||
* enum nfnl_hook_chaintype - chain type
|
||||
*
|
||||
|
|
|
@ -1362,11 +1362,13 @@ u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
|
|||
}
|
||||
|
||||
/**
|
||||
* __bpf_prog_run - run eBPF program on a given context
|
||||
* ___bpf_prog_run - run eBPF program on a given context
|
||||
* @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
|
||||
* @insn: is the array of eBPF instructions
|
||||
*
|
||||
* Decode and execute eBPF instructions.
|
||||
*
|
||||
* Return: whatever value is in %BPF_R0 at program exit
|
||||
*/
|
||||
static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
|
||||
{
|
||||
|
@ -1878,6 +1880,9 @@ static void bpf_prog_select_func(struct bpf_prog *fp)
|
|||
*
|
||||
* Try to JIT eBPF program, if JIT is not available, use interpreter.
|
||||
* The BPF program will be executed via BPF_PROG_RUN() macro.
|
||||
*
|
||||
* Return: the &fp argument along with &err set to 0 for success or
|
||||
* a negative errno code on failure
|
||||
*/
|
||||
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
|
||||
{
|
||||
|
|
|
@ -1565,8 +1565,8 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
|
|||
/* We cannot do copy_from_user or copy_to_user inside
|
||||
* the rcu_read_lock. Allocate enough space here.
|
||||
*/
|
||||
keys = kvmalloc(key_size * bucket_size, GFP_USER | __GFP_NOWARN);
|
||||
values = kvmalloc(value_size * bucket_size, GFP_USER | __GFP_NOWARN);
|
||||
keys = kvmalloc_array(key_size, bucket_size, GFP_USER | __GFP_NOWARN);
|
||||
values = kvmalloc_array(value_size, bucket_size, GFP_USER | __GFP_NOWARN);
|
||||
if (!keys || !values) {
|
||||
ret = -ENOMEM;
|
||||
goto after_loop;
|
||||
|
|
|
@ -397,8 +397,8 @@ BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
|
|||
void *ptr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
|
||||
if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
|
||||
for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
|
||||
if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
|
||||
continue;
|
||||
|
||||
storage = this_cpu_read(bpf_cgroup_storage_info[i].storage[stype]);
|
||||
|
@ -1070,12 +1070,12 @@ bpf_base_func_proto(enum bpf_func_id func_id)
|
|||
case BPF_FUNC_probe_read_user:
|
||||
return &bpf_probe_read_user_proto;
|
||||
case BPF_FUNC_probe_read_kernel:
|
||||
return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
|
||||
return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
|
||||
NULL : &bpf_probe_read_kernel_proto;
|
||||
case BPF_FUNC_probe_read_user_str:
|
||||
return &bpf_probe_read_user_str_proto;
|
||||
case BPF_FUNC_probe_read_kernel_str:
|
||||
return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
|
||||
return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
|
||||
NULL : &bpf_probe_read_kernel_str_proto;
|
||||
case BPF_FUNC_snprintf_btf:
|
||||
return &bpf_snprintf_btf_proto;
|
||||
|
|
|
@ -990,28 +990,29 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
|||
return &bpf_get_numa_node_id_proto;
|
||||
case BPF_FUNC_perf_event_read:
|
||||
return &bpf_perf_event_read_proto;
|
||||
case BPF_FUNC_probe_write_user:
|
||||
return bpf_get_probe_write_proto();
|
||||
case BPF_FUNC_current_task_under_cgroup:
|
||||
return &bpf_current_task_under_cgroup_proto;
|
||||
case BPF_FUNC_get_prandom_u32:
|
||||
return &bpf_get_prandom_u32_proto;
|
||||
case BPF_FUNC_probe_write_user:
|
||||
return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
|
||||
NULL : bpf_get_probe_write_proto();
|
||||
case BPF_FUNC_probe_read_user:
|
||||
return &bpf_probe_read_user_proto;
|
||||
case BPF_FUNC_probe_read_kernel:
|
||||
return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
|
||||
return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
|
||||
NULL : &bpf_probe_read_kernel_proto;
|
||||
case BPF_FUNC_probe_read_user_str:
|
||||
return &bpf_probe_read_user_str_proto;
|
||||
case BPF_FUNC_probe_read_kernel_str:
|
||||
return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
|
||||
return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
|
||||
NULL : &bpf_probe_read_kernel_str_proto;
|
||||
#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
|
||||
case BPF_FUNC_probe_read:
|
||||
return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
|
||||
return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
|
||||
NULL : &bpf_probe_read_compat_proto;
|
||||
case BPF_FUNC_probe_read_str:
|
||||
return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
|
||||
return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
|
||||
NULL : &bpf_probe_read_compat_str_proto;
|
||||
#endif
|
||||
#ifdef CONFIG_CGROUPS
|
||||
|
|
11
lib/once.c
11
lib/once.c
|
@ -3,10 +3,12 @@
|
|||
#include <linux/spinlock.h>
|
||||
#include <linux/once.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
struct once_work {
|
||||
struct work_struct work;
|
||||
struct static_key_true *key;
|
||||
struct module *module;
|
||||
};
|
||||
|
||||
static void once_deferred(struct work_struct *w)
|
||||
|
@ -16,10 +18,11 @@ static void once_deferred(struct work_struct *w)
|
|||
work = container_of(w, struct once_work, work);
|
||||
BUG_ON(!static_key_enabled(work->key));
|
||||
static_branch_disable(work->key);
|
||||
module_put(work->module);
|
||||
kfree(work);
|
||||
}
|
||||
|
||||
static void once_disable_jump(struct static_key_true *key)
|
||||
static void once_disable_jump(struct static_key_true *key, struct module *mod)
|
||||
{
|
||||
struct once_work *w;
|
||||
|
||||
|
@ -29,6 +32,8 @@ static void once_disable_jump(struct static_key_true *key)
|
|||
|
||||
INIT_WORK(&w->work, once_deferred);
|
||||
w->key = key;
|
||||
w->module = mod;
|
||||
__module_get(mod);
|
||||
schedule_work(&w->work);
|
||||
}
|
||||
|
||||
|
@ -53,11 +58,11 @@ bool __do_once_start(bool *done, unsigned long *flags)
|
|||
EXPORT_SYMBOL(__do_once_start);
|
||||
|
||||
void __do_once_done(bool *done, struct static_key_true *once_key,
|
||||
unsigned long *flags)
|
||||
unsigned long *flags, struct module *mod)
|
||||
__releases(once_lock)
|
||||
{
|
||||
*done = true;
|
||||
spin_unlock_irqrestore(&once_lock, *flags);
|
||||
once_disable_jump(once_key);
|
||||
once_disable_jump(once_key, mod);
|
||||
}
|
||||
EXPORT_SYMBOL(__do_once_done);
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#include <linux/vmalloc.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/filter.h>
|
||||
#include <linux/rcupdate_trace.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <net/bpf_sk_storage.h>
|
||||
#include <net/sock.h>
|
||||
|
@ -951,7 +952,10 @@ int bpf_prog_test_run_syscall(struct bpf_prog *prog,
|
|||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_lock_trace();
|
||||
retval = bpf_prog_run_pin_on_cpu(prog, ctx);
|
||||
rcu_read_unlock_trace();
|
||||
|
||||
if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
|
||||
err = -EFAULT;
|
||||
|
|
|
@ -166,8 +166,7 @@ static int br_switchdev_event(struct notifier_block *unused,
|
|||
case SWITCHDEV_FDB_ADD_TO_BRIDGE:
|
||||
fdb_info = ptr;
|
||||
err = br_fdb_external_learn_add(br, p, fdb_info->addr,
|
||||
fdb_info->vid,
|
||||
fdb_info->is_local, false);
|
||||
fdb_info->vid, false);
|
||||
if (err) {
|
||||
err = notifier_from_errno(err);
|
||||
break;
|
||||
|
|
|
@ -1044,10 +1044,7 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
|
|||
"FDB entry towards bridge must be permanent");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = br_fdb_external_learn_add(br, p, addr, vid,
|
||||
ndm->ndm_state & NUD_PERMANENT,
|
||||
true);
|
||||
err = br_fdb_external_learn_add(br, p, addr, vid, true);
|
||||
} else {
|
||||
spin_lock_bh(&br->hash_lock);
|
||||
err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid, nfea_tb);
|
||||
|
@ -1275,7 +1272,7 @@ void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
|
|||
}
|
||||
|
||||
int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
|
||||
const unsigned char *addr, u16 vid, bool is_local,
|
||||
const unsigned char *addr, u16 vid,
|
||||
bool swdev_notify)
|
||||
{
|
||||
struct net_bridge_fdb_entry *fdb;
|
||||
|
@ -1293,7 +1290,7 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
|
|||
if (swdev_notify)
|
||||
flags |= BIT(BR_FDB_ADDED_BY_USER);
|
||||
|
||||
if (is_local)
|
||||
if (!p)
|
||||
flags |= BIT(BR_FDB_LOCAL);
|
||||
|
||||
fdb = fdb_create(br, p, addr, vid, flags);
|
||||
|
@ -1322,7 +1319,7 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
|
|||
if (swdev_notify)
|
||||
set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
|
||||
|
||||
if (is_local)
|
||||
if (!p)
|
||||
set_bit(BR_FDB_LOCAL, &fdb->flags);
|
||||
|
||||
if (modified)
|
||||
|
|
|
@ -616,6 +616,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
|
|||
|
||||
err = dev_set_allmulti(dev, 1);
|
||||
if (err) {
|
||||
br_multicast_del_port(p);
|
||||
kfree(p); /* kobject not yet init'd, manually free */
|
||||
goto err1;
|
||||
}
|
||||
|
@ -729,6 +730,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
|
|||
err3:
|
||||
sysfs_remove_link(br->ifobj, p->dev->name);
|
||||
err2:
|
||||
br_multicast_del_port(p);
|
||||
kobject_put(&p->kobj);
|
||||
dev_set_allmulti(dev, -1);
|
||||
err1:
|
||||
|
|
|
@ -711,7 +711,7 @@ int br_fdb_get(struct sk_buff *skb, struct nlattr *tb[], struct net_device *dev,
|
|||
int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p);
|
||||
void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p);
|
||||
int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
|
||||
const unsigned char *addr, u16 vid, bool is_local,
|
||||
const unsigned char *addr, u16 vid,
|
||||
bool swdev_notify);
|
||||
int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
|
||||
const unsigned char *addr, u16 vid,
|
||||
|
|
|
@ -88,6 +88,12 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
|
|||
|
||||
skb = ip_fraglist_next(&iter);
|
||||
}
|
||||
|
||||
if (!err)
|
||||
return 0;
|
||||
|
||||
kfree_skb_list(iter.frag);
|
||||
|
||||
return err;
|
||||
}
|
||||
slow_path:
|
||||
|
|
|
@ -158,7 +158,7 @@ static void linkwatch_do_dev(struct net_device *dev)
|
|||
clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
|
||||
|
||||
rfc2863_policy(dev);
|
||||
if (dev->flags & IFF_UP && netif_device_present(dev)) {
|
||||
if (dev->flags & IFF_UP) {
|
||||
if (netif_carrier_ok(dev))
|
||||
dev_activate(dev);
|
||||
else
|
||||
|
@ -204,7 +204,8 @@ static void __linkwatch_run_queue(int urgent_only)
|
|||
dev = list_first_entry(&wrk, struct net_device, link_watch_list);
|
||||
list_del_init(&dev->link_watch_list);
|
||||
|
||||
if (urgent_only && !linkwatch_urgent_event(dev)) {
|
||||
if (!netif_device_present(dev) ||
|
||||
(urgent_only && !linkwatch_urgent_event(dev))) {
|
||||
list_add_tail(&dev->link_watch_list, &lweventlist);
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -634,7 +634,15 @@ bool page_pool_return_skb_page(struct page *page)
|
|||
struct page_pool *pp;
|
||||
|
||||
page = compound_head(page);
|
||||
if (unlikely(page->pp_magic != PP_SIGNATURE))
|
||||
|
||||
/* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation
|
||||
* in order to preserve any existing bits, such as bit 0 for the
|
||||
* head page of compound page and bit 1 for pfmemalloc page, so
|
||||
* mask those bits for freeing side when doing below checking,
|
||||
* and page_is_pfmemalloc() is checked in __page_pool_put_page()
|
||||
* to avoid recycling the pfmemalloc page.
|
||||
*/
|
||||
if (unlikely((page->pp_magic & ~0x3UL) != PP_SIGNATURE))
|
||||
return false;
|
||||
|
||||
pp = page->pp;
|
||||
|
|
|
@ -41,9 +41,9 @@ extern bool dccp_debug;
|
|||
#define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a)
|
||||
#define dccp_debug(fmt, a...) dccp_pr_debug_cat(KERN_DEBUG fmt, ##a)
|
||||
#else
|
||||
#define dccp_pr_debug(format, a...)
|
||||
#define dccp_pr_debug_cat(format, a...)
|
||||
#define dccp_debug(format, a...)
|
||||
#define dccp_pr_debug(format, a...) do {} while (0)
|
||||
#define dccp_pr_debug_cat(format, a...) do {} while (0)
|
||||
#define dccp_debug(format, a...) do {} while (0)
|
||||
#endif
|
||||
|
||||
extern struct inet_hashinfo dccp_hashinfo;
|
||||
|
|
|
@ -2291,8 +2291,8 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
|
|||
static void
|
||||
dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
|
||||
{
|
||||
struct switchdev_notifier_fdb_info info = {};
|
||||
struct dsa_switch *ds = switchdev_work->ds;
|
||||
struct switchdev_notifier_fdb_info info;
|
||||
struct dsa_port *dp;
|
||||
|
||||
if (!dsa_is_user_port(ds, switchdev_work->port))
|
||||
|
|
|
@ -984,6 +984,11 @@ static const struct proto_ops ieee802154_dgram_ops = {
|
|||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
static void ieee802154_sock_destruct(struct sock *sk)
|
||||
{
|
||||
skb_queue_purge(&sk->sk_receive_queue);
|
||||
}
|
||||
|
||||
/* Create a socket. Initialise the socket, blank the addresses
|
||||
* set the state.
|
||||
*/
|
||||
|
@ -1024,7 +1029,7 @@ static int ieee802154_create(struct net *net, struct socket *sock,
|
|||
sock->ops = ops;
|
||||
|
||||
sock_init_data(sock, sk);
|
||||
/* FIXME: sk->sk_destruct */
|
||||
sk->sk_destruct = ieee802154_sock_destruct;
|
||||
sk->sk_family = PF_IEEE802154;
|
||||
|
||||
/* Checksums on by default */
|
||||
|
|
|
@ -803,10 +803,17 @@ static void igmp_gq_timer_expire(struct timer_list *t)
|
|||
static void igmp_ifc_timer_expire(struct timer_list *t)
|
||||
{
|
||||
struct in_device *in_dev = from_timer(in_dev, t, mr_ifc_timer);
|
||||
u32 mr_ifc_count;
|
||||
|
||||
igmpv3_send_cr(in_dev);
|
||||
if (in_dev->mr_ifc_count) {
|
||||
in_dev->mr_ifc_count--;
|
||||
restart:
|
||||
mr_ifc_count = READ_ONCE(in_dev->mr_ifc_count);
|
||||
|
||||
if (mr_ifc_count) {
|
||||
if (cmpxchg(&in_dev->mr_ifc_count,
|
||||
mr_ifc_count,
|
||||
mr_ifc_count - 1) != mr_ifc_count)
|
||||
goto restart;
|
||||
igmp_ifc_start_timer(in_dev,
|
||||
unsolicited_report_interval(in_dev));
|
||||
}
|
||||
|
@ -818,7 +825,7 @@ static void igmp_ifc_event(struct in_device *in_dev)
|
|||
struct net *net = dev_net(in_dev->dev);
|
||||
if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
|
||||
return;
|
||||
in_dev->mr_ifc_count = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
|
||||
WRITE_ONCE(in_dev->mr_ifc_count, in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv);
|
||||
igmp_ifc_start_timer(in_dev, 1);
|
||||
}
|
||||
|
||||
|
@ -957,7 +964,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
|
|||
in_dev->mr_qri;
|
||||
}
|
||||
/* cancel the interface change timer */
|
||||
in_dev->mr_ifc_count = 0;
|
||||
WRITE_ONCE(in_dev->mr_ifc_count, 0);
|
||||
if (del_timer(&in_dev->mr_ifc_timer))
|
||||
__in_dev_put(in_dev);
|
||||
/* clear deleted report items */
|
||||
|
@ -1724,7 +1731,7 @@ void ip_mc_down(struct in_device *in_dev)
|
|||
igmp_group_dropped(pmc);
|
||||
|
||||
#ifdef CONFIG_IP_MULTICAST
|
||||
in_dev->mr_ifc_count = 0;
|
||||
WRITE_ONCE(in_dev->mr_ifc_count, 0);
|
||||
if (del_timer(&in_dev->mr_ifc_timer))
|
||||
__in_dev_put(in_dev);
|
||||
in_dev->mr_gq_running = 0;
|
||||
|
@ -1941,7 +1948,7 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
|
|||
pmc->sfmode = MCAST_INCLUDE;
|
||||
#ifdef CONFIG_IP_MULTICAST
|
||||
pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
|
||||
in_dev->mr_ifc_count = pmc->crcount;
|
||||
WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount);
|
||||
for (psf = pmc->sources; psf; psf = psf->sf_next)
|
||||
psf->sf_crcount = 0;
|
||||
igmp_ifc_event(pmc->interface);
|
||||
|
@ -2120,7 +2127,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
|
|||
/* else no filters; keep old mode for reports */
|
||||
|
||||
pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
|
||||
in_dev->mr_ifc_count = pmc->crcount;
|
||||
WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount);
|
||||
for (psf = pmc->sources; psf; psf = psf->sf_next)
|
||||
psf->sf_crcount = 0;
|
||||
igmp_ifc_event(in_dev);
|
||||
|
|
|
@ -1041,7 +1041,7 @@ static void bbr_init(struct sock *sk)
|
|||
bbr->prior_cwnd = 0;
|
||||
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
|
||||
bbr->rtt_cnt = 0;
|
||||
bbr->next_rtt_delivered = 0;
|
||||
bbr->next_rtt_delivered = tp->delivered;
|
||||
bbr->prev_ca_state = TCP_CA_Open;
|
||||
bbr->packet_conservation = 0;
|
||||
|
||||
|
|
|
@ -132,8 +132,11 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (ip > ip_to)
|
||||
if (ip > ip_to) {
|
||||
if (ip_to == 0)
|
||||
return -IPSET_ERR_HASH_ELEM;
|
||||
swap(ip, ip_to);
|
||||
}
|
||||
} else if (tb[IPSET_ATTR_CIDR]) {
|
||||
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
|
||||
|
||||
|
@ -144,6 +147,10 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
|
||||
|
||||
/* 64bit division is not allowed on 32bit */
|
||||
if (((u64)ip_to - ip + 1) >> (32 - h->netmask) > IPSET_MAX_RANGE)
|
||||
return -ERANGE;
|
||||
|
||||
if (retried) {
|
||||
ip = ntohl(h->next.ip);
|
||||
e.ip = htonl(ip);
|
||||
|
|
|
@ -121,6 +121,8 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
e.mark = ntohl(nla_get_be32(tb[IPSET_ATTR_MARK]));
|
||||
e.mark &= h->markmask;
|
||||
if (e.mark == 0 && e.ip == 0)
|
||||
return -IPSET_ERR_HASH_ELEM;
|
||||
|
||||
if (adt == IPSET_TEST ||
|
||||
!(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR])) {
|
||||
|
@ -133,8 +135,11 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (ip > ip_to)
|
||||
if (ip > ip_to) {
|
||||
if (e.mark == 0 && ip_to == 0)
|
||||
return -IPSET_ERR_HASH_ELEM;
|
||||
swap(ip, ip_to);
|
||||
}
|
||||
} else if (tb[IPSET_ATTR_CIDR]) {
|
||||
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
|
||||
|
||||
|
@ -143,6 +148,9 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
ip_set_mask_from_to(ip, ip_to, cidr);
|
||||
}
|
||||
|
||||
if (((u64)ip_to - ip + 1) > IPSET_MAX_RANGE)
|
||||
return -ERANGE;
|
||||
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip);
|
||||
for (; ip <= ip_to; ip++) {
|
||||
|
|
|
@ -173,6 +173,9 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
swap(port, port_to);
|
||||
}
|
||||
|
||||
if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE)
|
||||
return -ERANGE;
|
||||
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip);
|
||||
for (; ip <= ip_to; ip++) {
|
||||
|
|
|
@ -180,6 +180,9 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
swap(port, port_to);
|
||||
}
|
||||
|
||||
if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE)
|
||||
return -ERANGE;
|
||||
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip);
|
||||
for (; ip <= ip_to; ip++) {
|
||||
|
|
|
@ -253,6 +253,9 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
swap(port, port_to);
|
||||
}
|
||||
|
||||
if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE)
|
||||
return -ERANGE;
|
||||
|
||||
ip2_to = ip2_from;
|
||||
if (tb[IPSET_ATTR_IP2_TO]) {
|
||||
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
|
||||
|
|
|
@ -140,7 +140,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_net4_elem e = { .cidr = HOST_MASK };
|
||||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||
u32 ip = 0, ip_to = 0;
|
||||
u32 ip = 0, ip_to = 0, ipn, n = 0;
|
||||
int ret;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
|
@ -188,6 +188,15 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
if (ip + UINT_MAX == ip_to)
|
||||
return -IPSET_ERR_HASH_RANGE;
|
||||
}
|
||||
ipn = ip;
|
||||
do {
|
||||
ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr);
|
||||
n++;
|
||||
} while (ipn++ < ip_to);
|
||||
|
||||
if (n > IPSET_MAX_RANGE)
|
||||
return -ERANGE;
|
||||
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip);
|
||||
do {
|
||||
|
|
|
@ -202,7 +202,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 };
|
||||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||
u32 ip = 0, ip_to = 0;
|
||||
u32 ip = 0, ip_to = 0, ipn, n = 0;
|
||||
int ret;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
|
@ -256,6 +256,14 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
} else {
|
||||
ip_set_mask_from_to(ip, ip_to, e.cidr);
|
||||
}
|
||||
ipn = ip;
|
||||
do {
|
||||
ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr);
|
||||
n++;
|
||||
} while (ipn++ < ip_to);
|
||||
|
||||
if (n > IPSET_MAX_RANGE)
|
||||
return -ERANGE;
|
||||
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip);
|
||||
|
|
|
@ -168,7 +168,8 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
struct hash_netnet4_elem e = { };
|
||||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||
u32 ip = 0, ip_to = 0;
|
||||
u32 ip2 = 0, ip2_from = 0, ip2_to = 0;
|
||||
u32 ip2 = 0, ip2_from = 0, ip2_to = 0, ipn;
|
||||
u64 n = 0, m = 0;
|
||||
int ret;
|
||||
|
||||
if (tb[IPSET_ATTR_LINENO])
|
||||
|
@ -244,6 +245,19 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
} else {
|
||||
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
|
||||
}
|
||||
ipn = ip;
|
||||
do {
|
||||
ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr[0]);
|
||||
n++;
|
||||
} while (ipn++ < ip_to);
|
||||
ipn = ip2_from;
|
||||
do {
|
||||
ipn = ip_set_range_to_cidr(ipn, ip2_to, &e.cidr[1]);
|
||||
m++;
|
||||
} while (ipn++ < ip2_to);
|
||||
|
||||
if (n*m > IPSET_MAX_RANGE)
|
||||
return -ERANGE;
|
||||
|
||||
if (retried) {
|
||||
ip = ntohl(h->next.ip[0]);
|
||||
|
|
|
@ -158,7 +158,8 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 };
|
||||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||
u32 port, port_to, p = 0, ip = 0, ip_to = 0;
|
||||
u32 port, port_to, p = 0, ip = 0, ip_to = 0, ipn;
|
||||
u64 n = 0;
|
||||
bool with_ports = false;
|
||||
u8 cidr;
|
||||
int ret;
|
||||
|
@ -235,6 +236,14 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
} else {
|
||||
ip_set_mask_from_to(ip, ip_to, e.cidr + 1);
|
||||
}
|
||||
ipn = ip;
|
||||
do {
|
||||
ipn = ip_set_range_to_cidr(ipn, ip_to, &cidr);
|
||||
n++;
|
||||
} while (ipn++ < ip_to);
|
||||
|
||||
if (n*(port_to - port + 1) > IPSET_MAX_RANGE)
|
||||
return -ERANGE;
|
||||
|
||||
if (retried) {
|
||||
ip = ntohl(h->next.ip);
|
||||
|
|
|
@ -182,7 +182,8 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
struct hash_netportnet4_elem e = { };
|
||||
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
|
||||
u32 ip = 0, ip_to = 0, p = 0, port, port_to;
|
||||
u32 ip2_from = 0, ip2_to = 0, ip2;
|
||||
u32 ip2_from = 0, ip2_to = 0, ip2, ipn;
|
||||
u64 n = 0, m = 0;
|
||||
bool with_ports = false;
|
||||
int ret;
|
||||
|
||||
|
@ -284,6 +285,19 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
} else {
|
||||
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
|
||||
}
|
||||
ipn = ip;
|
||||
do {
|
||||
ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr[0]);
|
||||
n++;
|
||||
} while (ipn++ < ip_to);
|
||||
ipn = ip2_from;
|
||||
do {
|
||||
ipn = ip_set_range_to_cidr(ipn, ip2_to, &e.cidr[1]);
|
||||
m++;
|
||||
} while (ipn++ < ip2_to);
|
||||
|
||||
if (n*m*(port_to - port + 1) > IPSET_MAX_RANGE)
|
||||
return -ERANGE;
|
||||
|
||||
if (retried) {
|
||||
ip = ntohl(h->next.ip[0]);
|
||||
|
|
|
@ -66,22 +66,17 @@ EXPORT_SYMBOL_GPL(nf_conntrack_hash);
|
|||
|
||||
struct conntrack_gc_work {
|
||||
struct delayed_work dwork;
|
||||
u32 last_bucket;
|
||||
u32 next_bucket;
|
||||
bool exiting;
|
||||
bool early_drop;
|
||||
long next_gc_run;
|
||||
};
|
||||
|
||||
static __read_mostly struct kmem_cache *nf_conntrack_cachep;
|
||||
static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
|
||||
static __read_mostly bool nf_conntrack_locks_all;
|
||||
|
||||
/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
|
||||
#define GC_MAX_BUCKETS_DIV 128u
|
||||
/* upper bound of full table scan */
|
||||
#define GC_MAX_SCAN_JIFFIES (16u * HZ)
|
||||
/* desired ratio of entries found to be expired */
|
||||
#define GC_EVICT_RATIO 50u
|
||||
#define GC_SCAN_INTERVAL (120u * HZ)
|
||||
#define GC_SCAN_MAX_DURATION msecs_to_jiffies(10)
|
||||
|
||||
static struct conntrack_gc_work conntrack_gc_work;
|
||||
|
||||
|
@ -1363,17 +1358,13 @@ static bool gc_worker_can_early_drop(const struct nf_conn *ct)
|
|||
|
||||
static void gc_worker(struct work_struct *work)
|
||||
{
|
||||
unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
|
||||
unsigned int i, goal, buckets = 0, expired_count = 0;
|
||||
unsigned int nf_conntrack_max95 = 0;
|
||||
unsigned long end_time = jiffies + GC_SCAN_MAX_DURATION;
|
||||
unsigned int i, hashsz, nf_conntrack_max95 = 0;
|
||||
unsigned long next_run = GC_SCAN_INTERVAL;
|
||||
struct conntrack_gc_work *gc_work;
|
||||
unsigned int ratio, scanned = 0;
|
||||
unsigned long next_run;
|
||||
|
||||
gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
|
||||
|
||||
goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV;
|
||||
i = gc_work->last_bucket;
|
||||
i = gc_work->next_bucket;
|
||||
if (gc_work->early_drop)
|
||||
nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
|
||||
|
||||
|
@ -1381,15 +1372,15 @@ static void gc_worker(struct work_struct *work)
|
|||
struct nf_conntrack_tuple_hash *h;
|
||||
struct hlist_nulls_head *ct_hash;
|
||||
struct hlist_nulls_node *n;
|
||||
unsigned int hashsz;
|
||||
struct nf_conn *tmp;
|
||||
|
||||
i++;
|
||||
rcu_read_lock();
|
||||
|
||||
nf_conntrack_get_ht(&ct_hash, &hashsz);
|
||||
if (i >= hashsz)
|
||||
i = 0;
|
||||
if (i >= hashsz) {
|
||||
rcu_read_unlock();
|
||||
break;
|
||||
}
|
||||
|
||||
hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
|
||||
struct nf_conntrack_net *cnet;
|
||||
|
@ -1397,7 +1388,6 @@ static void gc_worker(struct work_struct *work)
|
|||
|
||||
tmp = nf_ct_tuplehash_to_ctrack(h);
|
||||
|
||||
scanned++;
|
||||
if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
|
||||
nf_ct_offload_timeout(tmp);
|
||||
continue;
|
||||
|
@ -1405,7 +1395,6 @@ static void gc_worker(struct work_struct *work)
|
|||
|
||||
if (nf_ct_is_expired(tmp)) {
|
||||
nf_ct_gc_expired(tmp);
|
||||
expired_count++;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1438,7 +1427,14 @@ static void gc_worker(struct work_struct *work)
|
|||
*/
|
||||
rcu_read_unlock();
|
||||
cond_resched();
|
||||
} while (++buckets < goal);
|
||||
i++;
|
||||
|
||||
if (time_after(jiffies, end_time) && i < hashsz) {
|
||||
gc_work->next_bucket = i;
|
||||
next_run = 0;
|
||||
break;
|
||||
}
|
||||
} while (i < hashsz);
|
||||
|
||||
if (gc_work->exiting)
|
||||
return;
|
||||
|
@ -1449,40 +1445,17 @@ static void gc_worker(struct work_struct *work)
|
|||
*
|
||||
* This worker is only here to reap expired entries when system went
|
||||
* idle after a busy period.
|
||||
*
|
||||
* The heuristics below are supposed to balance conflicting goals:
|
||||
*
|
||||
* 1. Minimize time until we notice a stale entry
|
||||
* 2. Maximize scan intervals to not waste cycles
|
||||
*
|
||||
* Normally, expire ratio will be close to 0.
|
||||
*
|
||||
* As soon as a sizeable fraction of the entries have expired
|
||||
* increase scan frequency.
|
||||
*/
|
||||
ratio = scanned ? expired_count * 100 / scanned : 0;
|
||||
if (ratio > GC_EVICT_RATIO) {
|
||||
gc_work->next_gc_run = min_interval;
|
||||
} else {
|
||||
unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
|
||||
|
||||
BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
|
||||
|
||||
gc_work->next_gc_run += min_interval;
|
||||
if (gc_work->next_gc_run > max)
|
||||
gc_work->next_gc_run = max;
|
||||
if (next_run) {
|
||||
gc_work->early_drop = false;
|
||||
gc_work->next_bucket = 0;
|
||||
}
|
||||
|
||||
next_run = gc_work->next_gc_run;
|
||||
gc_work->last_bucket = i;
|
||||
gc_work->early_drop = false;
|
||||
queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
|
||||
}
|
||||
|
||||
static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
|
||||
{
|
||||
INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
|
||||
gc_work->next_gc_run = HZ;
|
||||
gc_work->exiting = false;
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue