Networking fixes for 5.14-rc4, including fixes from bpf, can, WiFi (mac80211)
and netfilter trees. Current release - regressions: - mac80211: fix starting aggregation sessions on mesh interfaces Current release - new code bugs: - sctp: send pmtu probe only if packet loss in Search Complete state - bnxt_en: add missing periodic PHC overflow check - devlink: fix phys_port_name of virtual port and merge error - hns3: change the method of obtaining default ptp cycle - can: mcba_usb_start(): add missing urb->transfer_dma initialization Previous releases - regressions: - set true network header for ECN decapsulation - mlx5e: RX, avoid possible data corruption w/ relaxed ordering and LRO - phy: re-add check for PHY_BRCM_DIS_TXCRXC_NOENRGY on the BCM54811 PHY - sctp: fix return value check in __sctp_rcv_asconf_lookup Previous releases - always broken: - bpf: - more spectre corner case fixes, introduce a BPF nospec instruction for mitigating Spectre v4 - fix OOB read when printing XDP link fdinfo - sockmap: fix cleanup related races - mac80211: fix enabling 4-address mode on a sta vif after assoc - can: - raw: raw_setsockopt(): fix raw_rcv panic for sock UAF - j1939: j1939_session_deactivate(): clarify lifetime of session object, avoid UAF - fix number of identical memory leaks in USB drivers - tipc: - do not blindly write skb_shinfo frags when doing decryption - fix sleeping in tipc accept routine Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmEEWm8ACgkQMUZtbf5S Irv84A//V/nn9VRdpDpmodwBWVEc9SA00M/nmziRBLwRyG+fRMtnePY4Ha40TPbh LL6orth08hZKOjVmMc6Ea4EjZbV5E3iAKtAnaX6wi1HpEXVxKtFYnWxu9ydwTEd9 An1fltDtWYkNi3kiq7il+Tp1/yZAQ+NYv5zQZCWJ47kkN3jkjULdAEBqODA2A6Ul 0PQgS1rKzXukE19PlXDuaNuEekhTiEfaTwzHjdBJZkj1toGJGfHsvdQ/YJjixzB9 44SjE4PfxIaMWP0BVaD6hwzaVQhaZETXhZZufdIDdQd7sDbmd6CPODX6mXfLEq4u JaWylgobsK+5ScHE6siVI+ZlW7stq9l1Ynm10ADiwsZVzKEoP745484aEFOLO6Z+ Ln/IqDQCP/yJQmnl2i0+TfqVDh6BKYoIfUUK/+nzHw4Otycy0m3kj4P+74aYfjOv Q+cUgbXUemcrpq6wGUK+zK0NyNHVILvdPDnHPMMypwqPk18y5ZmFvaJAVUPSavD9 N7t9LoLyGwK3i/Ir4l+JJZ1KgAv1+TbmyNBWvY1Yk/r/vHU3nBPIv26s7YarNAwD 094vJEJ0+mqO4h+Xj1Nc7HEBFi46JfpN2L8uYoM7gpwziIRMdmpXVLmpEk43WmFi UMwWJWqabPEXaozC2UFcFLSk+jS7DiD+G5eG+Fd5HecmKzd7RI0= =sKPI -----END PGP SIGNATURE----- Merge tag 'net-5.14-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "Networking fixes for 5.14-rc4, including fixes from bpf, can, WiFi (mac80211) and netfilter trees. Current release - regressions: - mac80211: fix starting aggregation sessions on mesh interfaces Current release - new code bugs: - sctp: send pmtu probe only if packet loss in Search Complete state - bnxt_en: add missing periodic PHC overflow check - devlink: fix phys_port_name of virtual port and merge error - hns3: change the method of obtaining default ptp cycle - can: mcba_usb_start(): add missing urb->transfer_dma initialization Previous releases - regressions: - set true network header for ECN decapsulation - mlx5e: RX, avoid possible data corruption w/ relaxed ordering and LRO - phy: re-add check for PHY_BRCM_DIS_TXCRXC_NOENRGY on the BCM54811 PHY - sctp: fix return value check in __sctp_rcv_asconf_lookup Previous releases - always broken: - bpf: - more spectre corner case fixes, introduce a BPF nospec instruction for mitigating Spectre v4 - fix OOB read when printing XDP link fdinfo - sockmap: fix cleanup related races - mac80211: fix enabling 4-address mode on a sta vif after assoc - can: - raw: raw_setsockopt(): fix raw_rcv panic for sock UAF - j1939: j1939_session_deactivate(): clarify lifetime of session object, avoid UAF - fix number of identical memory leaks in USB drivers - tipc: - do not blindly write skb_shinfo frags when doing decryption - fix sleeping in tipc accept routine" * tag 'net-5.14-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (91 commits) gve: Update MAINTAINERS list can: esd_usb2: fix memory leak can: ems_usb: fix memory leak can: usb_8dev: fix memory leak can: mcba_usb_start(): add missing urb->transfer_dma initialization can: hi311x: fix a signedness bug in hi3110_cmd() MAINTAINERS: add Yasushi SHOJI as reviewer for the Microchip CAN BUS Analyzer Tool driver bpf: Fix leakage due to insufficient speculative store bypass mitigation bpf: Introduce BPF nospec instruction for mitigating Spectre v4 sis900: Fix missing pci_disable_device() in probe and remove net: let flow have same hash in two directions nfc: nfcsim: fix use after free during module unload tulip: windbond-840: Fix missing pci_disable_device() in probe and remove sctp: fix return value check in __sctp_rcv_asconf_lookup nfc: s3fwrn5: fix undefined parameter values in dev_err() net/mlx5: Fix mlx5_vport_tbl_attr chain from u16 to u32 net/mlx5e: Fix nullptr in mlx5e_hairpin_get_mdev() net/mlx5: Unload device upon firmware fatal error net/mlx5e: Fix page allocation failure for ptp-RQ over SF net/mlx5e: Fix page allocation failure for trap-RQ over SF ...
This commit is contained in:
commit
c7d1022326
12
MAINTAINERS
12
MAINTAINERS
|
@ -7858,9 +7858,9 @@ S: Maintained
|
|||
F: drivers/input/touchscreen/goodix.c
|
||||
|
||||
GOOGLE ETHERNET DRIVERS
|
||||
M: Catherine Sullivan <csully@google.com>
|
||||
R: Sagi Shahar <sagis@google.com>
|
||||
R: Jon Olson <jonolson@google.com>
|
||||
M: Jeroen de Borst <jeroendb@google.com>
|
||||
R: Catherine Sullivan <csully@google.com>
|
||||
R: David Awogbemila <awogbemila@google.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/networking/device_drivers/ethernet/google/gve.rst
|
||||
|
@ -11327,6 +11327,12 @@ W: https://linuxtv.org
|
|||
T: git git://linuxtv.org/media_tree.git
|
||||
F: drivers/media/radio/radio-maxiradio*
|
||||
|
||||
MCAB MICROCHIP CAN BUS ANALYZER TOOL DRIVER
|
||||
R: Yasushi SHOJI <yashi@spacecubics.com>
|
||||
L: linux-can@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/can/usb/mcba_usb.c
|
||||
|
||||
MCAN MMIO DEVICE DRIVER
|
||||
M: Chandrasekar Ramakrishnan <rcsekar@samsung.com>
|
||||
L: linux-can@vger.kernel.org
|
||||
|
|
|
@ -1602,6 +1602,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
|
|||
rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
|
||||
emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
|
||||
break;
|
||||
/* speculation barrier */
|
||||
case BPF_ST | BPF_NOSPEC:
|
||||
break;
|
||||
/* ST: *(size *)(dst + off) = imm */
|
||||
case BPF_ST | BPF_MEM | BPF_W:
|
||||
case BPF_ST | BPF_MEM | BPF_H:
|
||||
|
|
|
@ -579,7 +579,7 @@ uart2: serial@30890000 {
|
|||
};
|
||||
|
||||
flexcan1: can@308c0000 {
|
||||
compatible = "fsl,imx8mp-flexcan", "fsl,imx6q-flexcan";
|
||||
compatible = "fsl,imx8mp-flexcan";
|
||||
reg = <0x308c0000 0x10000>;
|
||||
interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&clk IMX8MP_CLK_IPG_ROOT>,
|
||||
|
@ -594,7 +594,7 @@ flexcan1: can@308c0000 {
|
|||
};
|
||||
|
||||
flexcan2: can@308d0000 {
|
||||
compatible = "fsl,imx8mp-flexcan", "fsl,imx6q-flexcan";
|
||||
compatible = "fsl,imx8mp-flexcan";
|
||||
reg = <0x308d0000 0x10000>;
|
||||
interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&clk IMX8MP_CLK_IPG_ROOT>,
|
||||
|
|
|
@ -823,6 +823,19 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
|||
return ret;
|
||||
break;
|
||||
|
||||
/* speculation barrier */
|
||||
case BPF_ST | BPF_NOSPEC:
|
||||
/*
|
||||
* Nothing required here.
|
||||
*
|
||||
* In case of arm64, we rely on the firmware mitigation of
|
||||
* Speculative Store Bypass as controlled via the ssbd kernel
|
||||
* parameter. Whenever the mitigation is enabled, it works
|
||||
* for all of the kernel code with no need to provide any
|
||||
* additional instructions.
|
||||
*/
|
||||
break;
|
||||
|
||||
/* ST: *(size *)(dst + off) = imm */
|
||||
case BPF_ST | BPF_MEM | BPF_W:
|
||||
case BPF_ST | BPF_MEM | BPF_H:
|
||||
|
|
|
@ -1355,6 +1355,9 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
|||
}
|
||||
break;
|
||||
|
||||
case BPF_ST | BPF_NOSPEC: /* speculation barrier */
|
||||
break;
|
||||
|
||||
case BPF_ST | BPF_B | BPF_MEM:
|
||||
case BPF_ST | BPF_H | BPF_MEM:
|
||||
case BPF_ST | BPF_W | BPF_MEM:
|
||||
|
|
|
@ -737,6 +737,12 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
|
|||
}
|
||||
break;
|
||||
|
||||
/*
|
||||
* BPF_ST NOSPEC (speculation barrier)
|
||||
*/
|
||||
case BPF_ST | BPF_NOSPEC:
|
||||
break;
|
||||
|
||||
/*
|
||||
* BPF_ST(X)
|
||||
*/
|
||||
|
|
|
@ -627,6 +627,12 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
|
|||
}
|
||||
break;
|
||||
|
||||
/*
|
||||
* BPF_ST NOSPEC (speculation barrier)
|
||||
*/
|
||||
case BPF_ST | BPF_NOSPEC:
|
||||
break;
|
||||
|
||||
/*
|
||||
* BPF_ST(X)
|
||||
*/
|
||||
|
|
|
@ -1251,6 +1251,10 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
|
|||
return -1;
|
||||
break;
|
||||
|
||||
/* speculation barrier */
|
||||
case BPF_ST | BPF_NOSPEC:
|
||||
break;
|
||||
|
||||
case BPF_ST | BPF_MEM | BPF_B:
|
||||
case BPF_ST | BPF_MEM | BPF_H:
|
||||
case BPF_ST | BPF_MEM | BPF_W:
|
||||
|
|
|
@ -939,6 +939,10 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
|
|||
emit_ld(rd, 0, RV_REG_T1, ctx);
|
||||
break;
|
||||
|
||||
/* speculation barrier */
|
||||
case BPF_ST | BPF_NOSPEC:
|
||||
break;
|
||||
|
||||
/* ST: *(size *)(dst + off) = imm */
|
||||
case BPF_ST | BPF_MEM | BPF_B:
|
||||
emit_imm(RV_REG_T1, imm, ctx);
|
||||
|
|
|
@ -1153,6 +1153,11 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
|||
break;
|
||||
}
|
||||
break;
|
||||
/*
|
||||
* BPF_NOSPEC (speculation barrier)
|
||||
*/
|
||||
case BPF_ST | BPF_NOSPEC:
|
||||
break;
|
||||
/*
|
||||
* BPF_ST(X)
|
||||
*/
|
||||
|
|
|
@ -1287,6 +1287,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
|
|||
return 1;
|
||||
break;
|
||||
}
|
||||
/* speculation barrier */
|
||||
case BPF_ST | BPF_NOSPEC:
|
||||
break;
|
||||
/* ST: *(size *)(dst + off) = imm */
|
||||
case BPF_ST | BPF_MEM | BPF_W:
|
||||
case BPF_ST | BPF_MEM | BPF_H:
|
||||
|
|
|
@ -1219,6 +1219,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
|||
}
|
||||
break;
|
||||
|
||||
/* speculation barrier */
|
||||
case BPF_ST | BPF_NOSPEC:
|
||||
if (boot_cpu_has(X86_FEATURE_XMM2))
|
||||
/* Emit 'lfence' */
|
||||
EMIT3(0x0F, 0xAE, 0xE8);
|
||||
break;
|
||||
|
||||
/* ST: *(u8*)(dst_reg + off) = imm */
|
||||
case BPF_ST | BPF_MEM | BPF_B:
|
||||
if (is_ereg(dst_reg))
|
||||
|
|
|
@ -1886,6 +1886,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
|||
i++;
|
||||
break;
|
||||
}
|
||||
/* speculation barrier */
|
||||
case BPF_ST | BPF_NOSPEC:
|
||||
if (boot_cpu_has(X86_FEATURE_XMM2))
|
||||
/* Emit 'lfence' */
|
||||
EMIT3(0x0F, 0xAE, 0xE8);
|
||||
break;
|
||||
/* ST: *(u8*)(dst_reg + off) = imm */
|
||||
case BPF_ST | BPF_MEM | BPF_H:
|
||||
case BPF_ST | BPF_MEM | BPF_B:
|
||||
|
|
|
@ -218,7 +218,7 @@ static int hi3110_spi_trans(struct spi_device *spi, int len)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static u8 hi3110_cmd(struct spi_device *spi, u8 command)
|
||||
static int hi3110_cmd(struct spi_device *spi, u8 command)
|
||||
{
|
||||
struct hi3110_priv *priv = spi_get_drvdata(spi);
|
||||
|
||||
|
|
|
@ -2300,6 +2300,7 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
|
|||
err, priv->regs_status.intf);
|
||||
mcp251xfd_dump(priv);
|
||||
mcp251xfd_chip_interrupts_disable(priv);
|
||||
mcp251xfd_timestamp_stop(priv);
|
||||
|
||||
return handled;
|
||||
}
|
||||
|
|
|
@ -255,6 +255,8 @@ struct ems_usb {
|
|||
unsigned int free_slots; /* remember number of available slots */
|
||||
|
||||
struct ems_cpc_msg active_params; /* active controller parameters */
|
||||
void *rxbuf[MAX_RX_URBS];
|
||||
dma_addr_t rxbuf_dma[MAX_RX_URBS];
|
||||
};
|
||||
|
||||
static void ems_usb_read_interrupt_callback(struct urb *urb)
|
||||
|
@ -587,6 +589,7 @@ static int ems_usb_start(struct ems_usb *dev)
|
|||
for (i = 0; i < MAX_RX_URBS; i++) {
|
||||
struct urb *urb = NULL;
|
||||
u8 *buf = NULL;
|
||||
dma_addr_t buf_dma;
|
||||
|
||||
/* create a URB, and a buffer for it */
|
||||
urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
|
@ -596,7 +599,7 @@ static int ems_usb_start(struct ems_usb *dev)
|
|||
}
|
||||
|
||||
buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
|
||||
&urb->transfer_dma);
|
||||
&buf_dma);
|
||||
if (!buf) {
|
||||
netdev_err(netdev, "No memory left for USB buffer\n");
|
||||
usb_free_urb(urb);
|
||||
|
@ -604,6 +607,8 @@ static int ems_usb_start(struct ems_usb *dev)
|
|||
break;
|
||||
}
|
||||
|
||||
urb->transfer_dma = buf_dma;
|
||||
|
||||
usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2),
|
||||
buf, RX_BUFFER_SIZE,
|
||||
ems_usb_read_bulk_callback, dev);
|
||||
|
@ -619,6 +624,9 @@ static int ems_usb_start(struct ems_usb *dev)
|
|||
break;
|
||||
}
|
||||
|
||||
dev->rxbuf[i] = buf;
|
||||
dev->rxbuf_dma[i] = buf_dma;
|
||||
|
||||
/* Drop reference, USB core will take care of freeing it */
|
||||
usb_free_urb(urb);
|
||||
}
|
||||
|
@ -684,6 +692,10 @@ static void unlink_all_urbs(struct ems_usb *dev)
|
|||
|
||||
usb_kill_anchored_urbs(&dev->rx_submitted);
|
||||
|
||||
for (i = 0; i < MAX_RX_URBS; ++i)
|
||||
usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
|
||||
dev->rxbuf[i], dev->rxbuf_dma[i]);
|
||||
|
||||
usb_kill_anchored_urbs(&dev->tx_submitted);
|
||||
atomic_set(&dev->active_tx_urbs, 0);
|
||||
|
||||
|
|
|
@ -195,6 +195,8 @@ struct esd_usb2 {
|
|||
int net_count;
|
||||
u32 version;
|
||||
int rxinitdone;
|
||||
void *rxbuf[MAX_RX_URBS];
|
||||
dma_addr_t rxbuf_dma[MAX_RX_URBS];
|
||||
};
|
||||
|
||||
struct esd_usb2_net_priv {
|
||||
|
@ -545,6 +547,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
|
|||
for (i = 0; i < MAX_RX_URBS; i++) {
|
||||
struct urb *urb = NULL;
|
||||
u8 *buf = NULL;
|
||||
dma_addr_t buf_dma;
|
||||
|
||||
/* create a URB, and a buffer for it */
|
||||
urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
|
@ -554,7 +557,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
|
|||
}
|
||||
|
||||
buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
|
||||
&urb->transfer_dma);
|
||||
&buf_dma);
|
||||
if (!buf) {
|
||||
dev_warn(dev->udev->dev.parent,
|
||||
"No memory left for USB buffer\n");
|
||||
|
@ -562,6 +565,8 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
|
|||
goto freeurb;
|
||||
}
|
||||
|
||||
urb->transfer_dma = buf_dma;
|
||||
|
||||
usb_fill_bulk_urb(urb, dev->udev,
|
||||
usb_rcvbulkpipe(dev->udev, 1),
|
||||
buf, RX_BUFFER_SIZE,
|
||||
|
@ -574,8 +579,12 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
|
|||
usb_unanchor_urb(urb);
|
||||
usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
|
||||
urb->transfer_dma);
|
||||
goto freeurb;
|
||||
}
|
||||
|
||||
dev->rxbuf[i] = buf;
|
||||
dev->rxbuf_dma[i] = buf_dma;
|
||||
|
||||
freeurb:
|
||||
/* Drop reference, USB core will take care of freeing it */
|
||||
usb_free_urb(urb);
|
||||
|
@ -663,6 +672,11 @@ static void unlink_all_urbs(struct esd_usb2 *dev)
|
|||
int i, j;
|
||||
|
||||
usb_kill_anchored_urbs(&dev->rx_submitted);
|
||||
|
||||
for (i = 0; i < MAX_RX_URBS; ++i)
|
||||
usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
|
||||
dev->rxbuf[i], dev->rxbuf_dma[i]);
|
||||
|
||||
for (i = 0; i < dev->net_count; i++) {
|
||||
priv = dev->nets[i];
|
||||
if (priv) {
|
||||
|
|
|
@ -653,6 +653,8 @@ static int mcba_usb_start(struct mcba_priv *priv)
|
|||
break;
|
||||
}
|
||||
|
||||
urb->transfer_dma = buf_dma;
|
||||
|
||||
usb_fill_bulk_urb(urb, priv->udev,
|
||||
usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_IN),
|
||||
buf, MCBA_USB_RX_BUFF_SIZE,
|
||||
|
|
|
@ -117,7 +117,8 @@
|
|||
#define PCAN_USB_BERR_MASK (PCAN_USB_ERR_RXERR | PCAN_USB_ERR_TXERR)
|
||||
|
||||
/* identify bus event packets with rx/tx error counters */
|
||||
#define PCAN_USB_ERR_CNT 0x80
|
||||
#define PCAN_USB_ERR_CNT_DEC 0x00 /* counters are decreasing */
|
||||
#define PCAN_USB_ERR_CNT_INC 0x80 /* counters are increasing */
|
||||
|
||||
/* private to PCAN-USB adapter */
|
||||
struct pcan_usb {
|
||||
|
@ -608,11 +609,12 @@ static int pcan_usb_handle_bus_evt(struct pcan_usb_msg_context *mc, u8 ir)
|
|||
|
||||
/* acccording to the content of the packet */
|
||||
switch (ir) {
|
||||
case PCAN_USB_ERR_CNT:
|
||||
case PCAN_USB_ERR_CNT_DEC:
|
||||
case PCAN_USB_ERR_CNT_INC:
|
||||
|
||||
/* save rx/tx error counters from in the device context */
|
||||
pdev->bec.rxerr = mc->ptr[0];
|
||||
pdev->bec.txerr = mc->ptr[1];
|
||||
pdev->bec.rxerr = mc->ptr[1];
|
||||
pdev->bec.txerr = mc->ptr[2];
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -137,7 +137,8 @@ struct usb_8dev_priv {
|
|||
u8 *cmd_msg_buffer;
|
||||
|
||||
struct mutex usb_8dev_cmd_lock;
|
||||
|
||||
void *rxbuf[MAX_RX_URBS];
|
||||
dma_addr_t rxbuf_dma[MAX_RX_URBS];
|
||||
};
|
||||
|
||||
/* tx frame */
|
||||
|
@ -733,6 +734,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
|
|||
for (i = 0; i < MAX_RX_URBS; i++) {
|
||||
struct urb *urb = NULL;
|
||||
u8 *buf;
|
||||
dma_addr_t buf_dma;
|
||||
|
||||
/* create a URB, and a buffer for it */
|
||||
urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
|
@ -742,7 +744,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
|
|||
}
|
||||
|
||||
buf = usb_alloc_coherent(priv->udev, RX_BUFFER_SIZE, GFP_KERNEL,
|
||||
&urb->transfer_dma);
|
||||
&buf_dma);
|
||||
if (!buf) {
|
||||
netdev_err(netdev, "No memory left for USB buffer\n");
|
||||
usb_free_urb(urb);
|
||||
|
@ -750,6 +752,8 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
|
|||
break;
|
||||
}
|
||||
|
||||
urb->transfer_dma = buf_dma;
|
||||
|
||||
usb_fill_bulk_urb(urb, priv->udev,
|
||||
usb_rcvbulkpipe(priv->udev,
|
||||
USB_8DEV_ENDP_DATA_RX),
|
||||
|
@ -767,6 +771,9 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
|
|||
break;
|
||||
}
|
||||
|
||||
priv->rxbuf[i] = buf;
|
||||
priv->rxbuf_dma[i] = buf_dma;
|
||||
|
||||
/* Drop reference, USB core will take care of freeing it */
|
||||
usb_free_urb(urb);
|
||||
}
|
||||
|
@ -836,6 +843,10 @@ static void unlink_all_urbs(struct usb_8dev_priv *priv)
|
|||
|
||||
usb_kill_anchored_urbs(&priv->rx_submitted);
|
||||
|
||||
for (i = 0; i < MAX_RX_URBS; ++i)
|
||||
usb_free_coherent(priv->udev, RX_BUFFER_SIZE,
|
||||
priv->rxbuf[i], priv->rxbuf_dma[i]);
|
||||
|
||||
usb_kill_anchored_urbs(&priv->tx_submitted);
|
||||
atomic_set(&priv->active_tx_urbs, 0);
|
||||
|
||||
|
|
|
@ -2155,7 +2155,7 @@ static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip,
|
|||
int i, err;
|
||||
|
||||
if (!vid)
|
||||
return -EOPNOTSUPP;
|
||||
return 0;
|
||||
|
||||
err = mv88e6xxx_vtu_get(chip, vid, &vlan);
|
||||
if (err)
|
||||
|
|
|
@ -12131,9 +12131,8 @@ static void bnxt_fw_reset_task(struct work_struct *work)
|
|||
/* Make sure fw_reset_state is 0 before clearing the flag */
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
|
||||
bnxt_ulp_start(bp, rc);
|
||||
if (!rc)
|
||||
bnxt_reenable_sriov(bp);
|
||||
bnxt_ulp_start(bp, 0);
|
||||
bnxt_reenable_sriov(bp);
|
||||
bnxt_vf_reps_alloc(bp);
|
||||
bnxt_vf_reps_open(bp);
|
||||
bnxt_dl_health_recovery_done(bp);
|
||||
|
|
|
@ -353,6 +353,12 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
|
|||
|
||||
bnxt_ptp_get_current_time(bp);
|
||||
ptp->next_period = now + HZ;
|
||||
if (time_after_eq(now, ptp->next_overflow_check)) {
|
||||
spin_lock_bh(&ptp->ptp_lock);
|
||||
timecounter_read(&ptp->tc);
|
||||
spin_unlock_bh(&ptp->ptp_lock);
|
||||
ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD;
|
||||
}
|
||||
return HZ;
|
||||
}
|
||||
|
||||
|
@ -423,6 +429,7 @@ int bnxt_ptp_init(struct bnxt *bp)
|
|||
ptp->cc.shift = 0;
|
||||
ptp->cc.mult = 1;
|
||||
|
||||
ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD;
|
||||
timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
|
||||
|
||||
ptp->ptp_info = bnxt_ptp_caps;
|
||||
|
|
|
@ -32,6 +32,10 @@ struct bnxt_ptp_cfg {
|
|||
u64 current_time;
|
||||
u64 old_time;
|
||||
unsigned long next_period;
|
||||
unsigned long next_overflow_check;
|
||||
/* 48-bit PHC overflows in 78 hours. Check overflow every 19 hours. */
|
||||
#define BNXT_PHC_OVERFLOW_PERIOD (19 * 3600 * HZ)
|
||||
|
||||
u16 tx_seqid;
|
||||
struct bnxt *bp;
|
||||
atomic_t tx_avail;
|
||||
|
|
|
@ -357,7 +357,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
|
||||
void __iomem *ioaddr;
|
||||
|
||||
i = pci_enable_device(pdev);
|
||||
i = pcim_enable_device(pdev);
|
||||
if (i) return i;
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
@ -379,7 +379,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
|
||||
ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
|
||||
if (!ioaddr)
|
||||
goto err_out_free_res;
|
||||
goto err_out_netdev;
|
||||
|
||||
for (i = 0; i < 3; i++)
|
||||
((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
|
||||
|
@ -458,8 +458,6 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
|
||||
err_out_cleardev:
|
||||
pci_iounmap(pdev, ioaddr);
|
||||
err_out_free_res:
|
||||
pci_release_regions(pdev);
|
||||
err_out_netdev:
|
||||
free_netdev (dev);
|
||||
return -ENODEV;
|
||||
|
@ -1526,7 +1524,6 @@ static void w840_remove1(struct pci_dev *pdev)
|
|||
if (dev) {
|
||||
struct netdev_private *np = netdev_priv(dev);
|
||||
unregister_netdev(dev);
|
||||
pci_release_regions(pdev);
|
||||
pci_iounmap(pdev, np->base_addr);
|
||||
free_netdev(dev);
|
||||
}
|
||||
|
|
|
@ -5,9 +5,27 @@
|
|||
#include "hclge_main.h"
|
||||
#include "hnae3.h"
|
||||
|
||||
static int hclge_ptp_get_cycle(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hclge_ptp *ptp = hdev->ptp;
|
||||
|
||||
ptp->cycle.quo = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG) &
|
||||
HCLGE_PTP_CYCLE_QUO_MASK;
|
||||
ptp->cycle.numer = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG);
|
||||
ptp->cycle.den = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
|
||||
|
||||
if (ptp->cycle.den == 0) {
|
||||
dev_err(&hdev->pdev->dev, "invalid ptp cycle denominator!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
|
||||
{
|
||||
struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
|
||||
struct hclge_ptp_cycle *cycle = &hdev->ptp->cycle;
|
||||
u64 adj_val, adj_base, diff;
|
||||
unsigned long flags;
|
||||
bool is_neg = false;
|
||||
|
@ -18,7 +36,7 @@ static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
|
|||
is_neg = true;
|
||||
}
|
||||
|
||||
adj_base = HCLGE_PTP_CYCLE_ADJ_BASE * HCLGE_PTP_CYCLE_ADJ_UNIT;
|
||||
adj_base = (u64)cycle->quo * (u64)cycle->den + (u64)cycle->numer;
|
||||
adj_val = adj_base * ppb;
|
||||
diff = div_u64(adj_val, 1000000000ULL);
|
||||
|
||||
|
@ -29,16 +47,16 @@ static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
|
|||
|
||||
/* This clock cycle is defined by three part: quotient, numerator
|
||||
* and denominator. For example, 2.5ns, the quotient is 2,
|
||||
* denominator is fixed to HCLGE_PTP_CYCLE_ADJ_UNIT, and numerator
|
||||
* is 0.5 * HCLGE_PTP_CYCLE_ADJ_UNIT.
|
||||
* denominator is fixed to ptp->cycle.den, and numerator
|
||||
* is 0.5 * ptp->cycle.den.
|
||||
*/
|
||||
quo = div_u64_rem(adj_val, HCLGE_PTP_CYCLE_ADJ_UNIT, &numerator);
|
||||
quo = div_u64_rem(adj_val, cycle->den, &numerator);
|
||||
|
||||
spin_lock_irqsave(&hdev->ptp->lock, flags);
|
||||
writel(quo, hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG);
|
||||
writel(quo & HCLGE_PTP_CYCLE_QUO_MASK,
|
||||
hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG);
|
||||
writel(numerator, hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG);
|
||||
writel(HCLGE_PTP_CYCLE_ADJ_UNIT,
|
||||
hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
|
||||
writel(cycle->den, hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
|
||||
writel(HCLGE_PTP_CYCLE_ADJ_EN,
|
||||
hdev->ptp->io_base + HCLGE_PTP_CYCLE_CFG_REG);
|
||||
spin_unlock_irqrestore(&hdev->ptp->lock, flags);
|
||||
|
@ -475,6 +493,10 @@ int hclge_ptp_init(struct hclge_dev *hdev)
|
|||
ret = hclge_ptp_create_clock(hdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = hclge_ptp_get_cycle(hdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hclge_ptp_int_en(hdev, true);
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#define HCLGE_PTP_TIME_ADJ_REG 0x60
|
||||
#define HCLGE_PTP_TIME_ADJ_EN BIT(0)
|
||||
#define HCLGE_PTP_CYCLE_QUO_REG 0x64
|
||||
#define HCLGE_PTP_CYCLE_QUO_MASK GENMASK(7, 0)
|
||||
#define HCLGE_PTP_CYCLE_DEN_REG 0x68
|
||||
#define HCLGE_PTP_CYCLE_NUM_REG 0x6C
|
||||
#define HCLGE_PTP_CYCLE_CFG_REG 0x70
|
||||
|
@ -37,9 +38,7 @@
|
|||
#define HCLGE_PTP_CUR_TIME_SEC_L_REG 0x78
|
||||
#define HCLGE_PTP_CUR_TIME_NSEC_REG 0x7C
|
||||
|
||||
#define HCLGE_PTP_CYCLE_ADJ_BASE 2
|
||||
#define HCLGE_PTP_CYCLE_ADJ_MAX 500000000
|
||||
#define HCLGE_PTP_CYCLE_ADJ_UNIT 100000000
|
||||
#define HCLGE_PTP_SEC_H_OFFSET 32u
|
||||
#define HCLGE_PTP_SEC_L_MASK GENMASK(31, 0)
|
||||
|
||||
|
@ -47,6 +46,12 @@
|
|||
#define HCLGE_PTP_FLAG_TX_EN 1
|
||||
#define HCLGE_PTP_FLAG_RX_EN 2
|
||||
|
||||
struct hclge_ptp_cycle {
|
||||
u32 quo;
|
||||
u32 numer;
|
||||
u32 den;
|
||||
};
|
||||
|
||||
struct hclge_ptp {
|
||||
struct hclge_dev *hdev;
|
||||
struct ptp_clock *clock;
|
||||
|
@ -58,6 +63,7 @@ struct hclge_ptp {
|
|||
spinlock_t lock; /* protects ptp registers */
|
||||
u32 ptp_cfg;
|
||||
u32 last_tx_seqid;
|
||||
struct hclge_ptp_cycle cycle;
|
||||
unsigned long tx_start;
|
||||
unsigned long tx_cnt;
|
||||
unsigned long tx_skipped;
|
||||
|
|
|
@ -980,7 +980,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
|
|||
default:
|
||||
/* if we got here and link is up something bad is afoot */
|
||||
netdev_info(netdev,
|
||||
"WARNING: Link is up but PHY type 0x%x is not recognized.\n",
|
||||
"WARNING: Link is up but PHY type 0x%x is not recognized, or incorrect cable is in use\n",
|
||||
hw_link_info->phy_type);
|
||||
}
|
||||
|
||||
|
@ -5294,6 +5294,10 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
|
|||
dev_warn(&pf->pdev->dev,
|
||||
"Device configuration forbids SW from starting the LLDP agent.\n");
|
||||
return -EINVAL;
|
||||
case I40E_AQ_RC_EAGAIN:
|
||||
dev_warn(&pf->pdev->dev,
|
||||
"Stop FW LLDP agent command is still being processed, please try again in a second.\n");
|
||||
return -EBUSY;
|
||||
default:
|
||||
dev_warn(&pf->pdev->dev,
|
||||
"Starting FW LLDP agent failed: error: %s, %s\n",
|
||||
|
|
|
@ -4454,11 +4454,10 @@ int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
|
|||
}
|
||||
|
||||
/**
|
||||
* i40e_vsi_control_tx - Start or stop a VSI's rings
|
||||
* i40e_vsi_enable_tx - Start a VSI's rings
|
||||
* @vsi: the VSI being configured
|
||||
* @enable: start or stop the rings
|
||||
**/
|
||||
static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
|
||||
static int i40e_vsi_enable_tx(struct i40e_vsi *vsi)
|
||||
{
|
||||
struct i40e_pf *pf = vsi->back;
|
||||
int i, pf_q, ret = 0;
|
||||
|
@ -4467,7 +4466,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
|
|||
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
|
||||
ret = i40e_control_wait_tx_q(vsi->seid, pf,
|
||||
pf_q,
|
||||
false /*is xdp*/, enable);
|
||||
false /*is xdp*/, true);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
|
@ -4476,7 +4475,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
|
|||
|
||||
ret = i40e_control_wait_tx_q(vsi->seid, pf,
|
||||
pf_q + vsi->alloc_queue_pairs,
|
||||
true /*is xdp*/, enable);
|
||||
true /*is xdp*/, true);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
@ -4574,32 +4573,25 @@ int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
|
|||
}
|
||||
|
||||
/**
|
||||
* i40e_vsi_control_rx - Start or stop a VSI's rings
|
||||
* i40e_vsi_enable_rx - Start a VSI's rings
|
||||
* @vsi: the VSI being configured
|
||||
* @enable: start or stop the rings
|
||||
**/
|
||||
static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
|
||||
static int i40e_vsi_enable_rx(struct i40e_vsi *vsi)
|
||||
{
|
||||
struct i40e_pf *pf = vsi->back;
|
||||
int i, pf_q, ret = 0;
|
||||
|
||||
pf_q = vsi->base_queue;
|
||||
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
|
||||
ret = i40e_control_wait_rx_q(pf, pf_q, enable);
|
||||
ret = i40e_control_wait_rx_q(pf, pf_q, true);
|
||||
if (ret) {
|
||||
dev_info(&pf->pdev->dev,
|
||||
"VSI seid %d Rx ring %d %sable timeout\n",
|
||||
vsi->seid, pf_q, (enable ? "en" : "dis"));
|
||||
"VSI seid %d Rx ring %d enable timeout\n",
|
||||
vsi->seid, pf_q);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Due to HW errata, on Rx disable only, the register can indicate done
|
||||
* before it really is. Needs 50ms to be sure
|
||||
*/
|
||||
if (!enable)
|
||||
mdelay(50);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -4612,29 +4604,47 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi)
|
|||
int ret = 0;
|
||||
|
||||
/* do rx first for enable and last for disable */
|
||||
ret = i40e_vsi_control_rx(vsi, true);
|
||||
ret = i40e_vsi_enable_rx(vsi);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = i40e_vsi_control_tx(vsi, true);
|
||||
ret = i40e_vsi_enable_tx(vsi);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define I40E_DISABLE_TX_GAP_MSEC 50
|
||||
|
||||
/**
|
||||
* i40e_vsi_stop_rings - Stop a VSI's rings
|
||||
* @vsi: the VSI being configured
|
||||
**/
|
||||
void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
|
||||
{
|
||||
struct i40e_pf *pf = vsi->back;
|
||||
int pf_q, err, q_end;
|
||||
|
||||
/* When port TX is suspended, don't wait */
|
||||
if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
|
||||
return i40e_vsi_stop_rings_no_wait(vsi);
|
||||
|
||||
/* do rx first for enable and last for disable
|
||||
* Ignore return value, we need to shutdown whatever we can
|
||||
*/
|
||||
i40e_vsi_control_tx(vsi, false);
|
||||
i40e_vsi_control_rx(vsi, false);
|
||||
q_end = vsi->base_queue + vsi->num_queue_pairs;
|
||||
for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
|
||||
i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
|
||||
|
||||
for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
|
||||
err = i40e_control_wait_rx_q(pf, pf_q, false);
|
||||
if (err)
|
||||
dev_info(&pf->pdev->dev,
|
||||
"VSI seid %d Rx ring %d dissable timeout\n",
|
||||
vsi->seid, pf_q);
|
||||
}
|
||||
|
||||
msleep(I40E_DISABLE_TX_GAP_MSEC);
|
||||
pf_q = vsi->base_queue;
|
||||
for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
|
||||
wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
|
||||
|
||||
i40e_vsi_wait_queues_disabled(vsi);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -7280,6 +7290,8 @@ static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
|
|||
}
|
||||
if (vsi->num_queue_pairs <
|
||||
(mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
|
||||
dev_err(&vsi->back->pdev->dev,
|
||||
"Failed to create traffic channel, insufficient number of queues.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (sum_max_rate > i40e_get_link_speed(vsi)) {
|
||||
|
@ -13261,6 +13273,7 @@ static const struct net_device_ops i40e_netdev_ops = {
|
|||
.ndo_poll_controller = i40e_netpoll,
|
||||
#endif
|
||||
.ndo_setup_tc = __i40e_setup_tc,
|
||||
.ndo_select_queue = i40e_lan_select_queue,
|
||||
.ndo_set_features = i40e_set_features,
|
||||
.ndo_set_vf_mac = i40e_ndo_set_vf_mac,
|
||||
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
|
||||
|
|
|
@ -3631,6 +3631,56 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
|||
return -1;
|
||||
}
|
||||
|
||||
static u16 i40e_swdcb_skb_tx_hash(struct net_device *dev,
|
||||
const struct sk_buff *skb,
|
||||
u16 num_tx_queues)
|
||||
{
|
||||
u32 jhash_initval_salt = 0xd631614b;
|
||||
u32 hash;
|
||||
|
||||
if (skb->sk && skb->sk->sk_hash)
|
||||
hash = skb->sk->sk_hash;
|
||||
else
|
||||
hash = (__force u16)skb->protocol ^ skb->hash;
|
||||
|
||||
hash = jhash_1word(hash, jhash_initval_salt);
|
||||
|
||||
return (u16)(((u64)hash * num_tx_queues) >> 32);
|
||||
}
|
||||
|
||||
u16 i40e_lan_select_queue(struct net_device *netdev,
|
||||
struct sk_buff *skb,
|
||||
struct net_device __always_unused *sb_dev)
|
||||
{
|
||||
struct i40e_netdev_priv *np = netdev_priv(netdev);
|
||||
struct i40e_vsi *vsi = np->vsi;
|
||||
struct i40e_hw *hw;
|
||||
u16 qoffset;
|
||||
u16 qcount;
|
||||
u8 tclass;
|
||||
u16 hash;
|
||||
u8 prio;
|
||||
|
||||
/* is DCB enabled at all? */
|
||||
if (vsi->tc_config.numtc == 1)
|
||||
return i40e_swdcb_skb_tx_hash(netdev, skb,
|
||||
netdev->real_num_tx_queues);
|
||||
|
||||
prio = skb->priority;
|
||||
hw = &vsi->back->hw;
|
||||
tclass = hw->local_dcbx_config.etscfg.prioritytable[prio];
|
||||
/* sanity check */
|
||||
if (unlikely(!(vsi->tc_config.enabled_tc & BIT(tclass))))
|
||||
tclass = 0;
|
||||
|
||||
/* select a queue assigned for the given TC */
|
||||
qcount = vsi->tc_config.tc_info[tclass].qcount;
|
||||
hash = i40e_swdcb_skb_tx_hash(netdev, skb, qcount);
|
||||
|
||||
qoffset = vsi->tc_config.tc_info[tclass].qoffset;
|
||||
return qoffset + hash;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
|
||||
* @xdpf: data to transmit
|
||||
|
|
|
@ -451,6 +451,8 @@ static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
|
|||
|
||||
bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
|
||||
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
|
||||
u16 i40e_lan_select_queue(struct net_device *netdev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev);
|
||||
void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
|
||||
void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
|
||||
int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
|
||||
|
|
|
@ -1504,8 +1504,8 @@ static int cgx_lmac_init(struct cgx *cgx)
|
|||
|
||||
/* Add reference */
|
||||
cgx->lmac_idmap[lmac->lmac_id] = lmac;
|
||||
cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
|
||||
set_bit(lmac->lmac_id, &cgx->lmac_bmap);
|
||||
cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
|
||||
}
|
||||
|
||||
return cgx_lmac_verify_fwi_version(cgx);
|
||||
|
|
|
@ -151,7 +151,10 @@ enum npc_kpu_lh_ltype {
|
|||
* Software assigns pkind for each incoming port such as CGX
|
||||
* Ethernet interfaces, LBK interfaces, etc.
|
||||
*/
|
||||
#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_VLAN_EXDSA_PKIND
|
||||
|
||||
enum npc_pkind_type {
|
||||
NPC_RX_LBK_PKIND = 0ULL,
|
||||
NPC_RX_VLAN_EXDSA_PKIND = 56ULL,
|
||||
NPC_RX_CHLEN24B_PKIND = 57ULL,
|
||||
NPC_RX_CPT_HDR_PKIND,
|
||||
|
|
|
@ -391,8 +391,10 @@ void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
|
|||
|
||||
/* Get numVFs attached to this PF and first HWVF */
|
||||
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
|
||||
*numvfs = (cfg >> 12) & 0xFF;
|
||||
*hwvf = cfg & 0xFFF;
|
||||
if (numvfs)
|
||||
*numvfs = (cfg >> 12) & 0xFF;
|
||||
if (hwvf)
|
||||
*hwvf = cfg & 0xFFF;
|
||||
}
|
||||
|
||||
static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
|
||||
|
|
|
@ -196,11 +196,22 @@ static void nix_rx_sync(struct rvu *rvu, int blkaddr)
|
|||
{
|
||||
int err;
|
||||
|
||||
/*Sync all in flight RX packets to LLC/DRAM */
|
||||
/* Sync all in flight RX packets to LLC/DRAM */
|
||||
rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
|
||||
err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
|
||||
if (err)
|
||||
dev_err(rvu->dev, "NIX RX software sync failed\n");
|
||||
dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
|
||||
|
||||
/* SW_SYNC ensures all existing transactions are finished and pkts
|
||||
* are written to LLC/DRAM, queues should be teared down after
|
||||
* successful SW_SYNC. Due to a HW errata, in some rare scenarios
|
||||
* an existing transaction might end after SW_SYNC operation. To
|
||||
* ensure operation is fully done, do the SW_SYNC twice.
|
||||
*/
|
||||
rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
|
||||
err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
|
||||
if (err)
|
||||
dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
|
||||
}
|
||||
|
||||
static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
|
||||
|
@ -298,6 +309,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
|
|||
rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
|
||||
pfvf->rx_chan_cnt = 1;
|
||||
pfvf->tx_chan_cnt = 1;
|
||||
rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
|
||||
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
|
||||
pfvf->rx_chan_base,
|
||||
pfvf->rx_chan_cnt);
|
||||
|
@ -3842,7 +3854,6 @@ static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
|
|||
vlan = &nix_hw->txvlan;
|
||||
kfree(vlan->rsrc.bmap);
|
||||
mutex_destroy(&vlan->rsrc_lock);
|
||||
devm_kfree(rvu->dev, vlan->entry2pfvf_map);
|
||||
|
||||
mcast = &nix_hw->mcast;
|
||||
qmem_free(rvu->dev, mcast->mce_ctx);
|
||||
|
|
|
@ -1721,7 +1721,6 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
|
|||
{
|
||||
struct rvu_hwinfo *hw = rvu->hw;
|
||||
int num_pkinds, num_kpus, idx;
|
||||
struct npc_pkind *pkind;
|
||||
|
||||
/* Disable all KPUs and their entries */
|
||||
for (idx = 0; idx < hw->npc_kpus; idx++) {
|
||||
|
@ -1739,9 +1738,8 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
|
|||
* Check HW max count to avoid configuring junk or
|
||||
* writing to unsupported CSR addresses.
|
||||
*/
|
||||
pkind = &hw->pkind;
|
||||
num_pkinds = rvu->kpu.pkinds;
|
||||
num_pkinds = min_t(int, pkind->rsrc.max, num_pkinds);
|
||||
num_pkinds = min_t(int, hw->npc_pkinds, num_pkinds);
|
||||
|
||||
for (idx = 0; idx < num_pkinds; idx++)
|
||||
npc_config_kpuaction(rvu, blkaddr, &rvu->kpu.ikpu[idx], 0, idx, true);
|
||||
|
@ -1891,7 +1889,8 @@ static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr)
|
|||
if (npc_const1 & BIT_ULL(63))
|
||||
npc_const2 = rvu_read64(rvu, blkaddr, NPC_AF_CONST2);
|
||||
|
||||
pkind->rsrc.max = (npc_const1 >> 12) & 0xFFULL;
|
||||
pkind->rsrc.max = NPC_UNRESERVED_PKIND_COUNT;
|
||||
hw->npc_pkinds = (npc_const1 >> 12) & 0xFFULL;
|
||||
hw->npc_kpu_entries = npc_const1 & 0xFFFULL;
|
||||
hw->npc_kpus = (npc_const >> 8) & 0x1FULL;
|
||||
hw->npc_intfs = npc_const & 0xFULL;
|
||||
|
@ -2002,6 +2001,10 @@ int rvu_npc_init(struct rvu *rvu)
|
|||
err = rvu_alloc_bitmap(&pkind->rsrc);
|
||||
if (err)
|
||||
return err;
|
||||
/* Reserve PKIND#0 for LBKs. Power reset value of LBK_CH_PKIND is '0',
|
||||
* no need to configure PKIND for all LBKs separately.
|
||||
*/
|
||||
rvu_alloc_rsrc(&pkind->rsrc);
|
||||
|
||||
/* Allocate mem for pkind to PF and channel mapping info */
|
||||
pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max,
|
||||
|
|
|
@ -71,8 +71,8 @@ static int rvu_switch_install_rules(struct rvu *rvu)
|
|||
struct rvu_switch *rswitch = &rvu->rswitch;
|
||||
u16 start = rswitch->start_entry;
|
||||
struct rvu_hwinfo *hw = rvu->hw;
|
||||
int pf, vf, numvfs, hwvf;
|
||||
u16 pcifunc, entry = 0;
|
||||
int pf, vf, numvfs;
|
||||
int err;
|
||||
|
||||
for (pf = 1; pf < hw->total_pfs; pf++) {
|
||||
|
@ -110,8 +110,8 @@ static int rvu_switch_install_rules(struct rvu *rvu)
|
|||
|
||||
rswitch->entry2pcifunc[entry++] = pcifunc;
|
||||
|
||||
rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
|
||||
for (vf = 0; vf < numvfs; vf++, hwvf++) {
|
||||
rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
|
||||
for (vf = 0; vf < numvfs; vf++) {
|
||||
pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
|
||||
rvu_get_nix_blkaddr(rvu, pcifunc);
|
||||
|
||||
|
@ -198,7 +198,7 @@ void rvu_switch_disable(struct rvu *rvu)
|
|||
struct npc_mcam_free_entry_req free_req = { 0 };
|
||||
struct rvu_switch *rswitch = &rvu->rswitch;
|
||||
struct rvu_hwinfo *hw = rvu->hw;
|
||||
int pf, vf, numvfs, hwvf;
|
||||
int pf, vf, numvfs;
|
||||
struct msg_rsp rsp;
|
||||
u16 pcifunc;
|
||||
int err;
|
||||
|
@ -217,7 +217,8 @@ void rvu_switch_disable(struct rvu *rvu)
|
|||
"Reverting RX rule for PF%d failed(%d)\n",
|
||||
pf, err);
|
||||
|
||||
for (vf = 0; vf < numvfs; vf++, hwvf++) {
|
||||
rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
|
||||
for (vf = 0; vf < numvfs; vf++) {
|
||||
pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
|
||||
err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
|
||||
if (err)
|
||||
|
|
|
@ -924,12 +924,14 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
|
|||
aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
|
||||
aq->cq.drop_ena = 1;
|
||||
|
||||
/* Enable receive CQ backpressure */
|
||||
aq->cq.bp_ena = 1;
|
||||
aq->cq.bpid = pfvf->bpid[0];
|
||||
if (!is_otx2_lbkvf(pfvf->pdev)) {
|
||||
/* Enable receive CQ backpressure */
|
||||
aq->cq.bp_ena = 1;
|
||||
aq->cq.bpid = pfvf->bpid[0];
|
||||
|
||||
/* Set backpressure level is same as cq pass level */
|
||||
aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
|
||||
/* Set backpressure level is same as cq pass level */
|
||||
aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
|
||||
}
|
||||
}
|
||||
|
||||
/* Fill AQ info */
|
||||
|
@ -1186,7 +1188,7 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
|
|||
aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
|
||||
|
||||
/* Enable backpressure for RQ aura */
|
||||
if (aura_id < pfvf->hw.rqpool_cnt) {
|
||||
if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) {
|
||||
aq->aura.bp_ena = 0;
|
||||
aq->aura.nix0_bpid = pfvf->bpid[0];
|
||||
/* Set backpressure level for RQ's Aura */
|
||||
|
|
|
@ -298,15 +298,14 @@ static int otx2_set_channels(struct net_device *dev,
|
|||
err = otx2_set_real_num_queues(dev, channel->tx_count,
|
||||
channel->rx_count);
|
||||
if (err)
|
||||
goto fail;
|
||||
return err;
|
||||
|
||||
pfvf->hw.rx_queues = channel->rx_count;
|
||||
pfvf->hw.tx_queues = channel->tx_count;
|
||||
pfvf->qset.cq_cnt = pfvf->hw.tx_queues + pfvf->hw.rx_queues;
|
||||
|
||||
fail:
|
||||
if (if_up)
|
||||
dev->netdev_ops->ndo_open(dev);
|
||||
err = dev->netdev_ops->ndo_open(dev);
|
||||
|
||||
netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
|
||||
pfvf->hw.tx_queues, pfvf->hw.rx_queues);
|
||||
|
@ -410,7 +409,7 @@ static int otx2_set_ringparam(struct net_device *netdev,
|
|||
qs->rqe_cnt = rx_count;
|
||||
|
||||
if (if_up)
|
||||
netdev->netdev_ops->ndo_open(netdev);
|
||||
return netdev->netdev_ops->ndo_open(netdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1662,6 +1662,7 @@ int otx2_open(struct net_device *netdev)
|
|||
err_tx_stop_queues:
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
netif_carrier_off(netdev);
|
||||
pf->flags |= OTX2_FLAG_INTF_DOWN;
|
||||
err_free_cints:
|
||||
otx2_free_cints(pf, qidx);
|
||||
vec = pci_irq_vector(pf->pdev,
|
||||
|
@ -1689,6 +1690,10 @@ int otx2_stop(struct net_device *netdev)
|
|||
struct otx2_rss_info *rss;
|
||||
int qidx, vec, wrk;
|
||||
|
||||
/* If the DOWN flag is set resources are already freed */
|
||||
if (pf->flags & OTX2_FLAG_INTF_DOWN)
|
||||
return 0;
|
||||
|
||||
netif_carrier_off(netdev);
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
|
||||
|
|
|
@ -3535,6 +3535,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
|
|||
|
||||
if (!SRIOV_VALID_STATE(dev->flags)) {
|
||||
mlx4_err(dev, "Invalid SRIOV state\n");
|
||||
err = -EINVAL;
|
||||
goto err_close;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -500,10 +500,7 @@ static int next_phys_dev(struct device *dev, const void *data)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/* This function is called with two flows:
|
||||
* 1. During initialization of mlx5_core_dev and we don't need to lock it.
|
||||
* 2. During LAG configure stage and caller holds &mlx5_intf_mutex.
|
||||
*/
|
||||
/* Must be called with intf_mutex held */
|
||||
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct auxiliary_device *adev;
|
||||
|
|
|
@ -471,6 +471,15 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
|
|||
param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
|
||||
}
|
||||
|
||||
static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
|
||||
{
|
||||
bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) &&
|
||||
MLX5_CAP_GEN(mdev, relaxed_ordering_write);
|
||||
|
||||
return ro && params->lro_en ?
|
||||
MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
|
||||
}
|
||||
|
||||
int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params,
|
||||
struct mlx5e_xsk_param *xsk,
|
||||
|
@ -508,7 +517,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
|
|||
}
|
||||
|
||||
MLX5_SET(wq, wq, wq_type, params->rq_wq_type);
|
||||
MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
|
||||
MLX5_SET(wq, wq, end_padding_mode, rq_end_pad_mode(mdev, params));
|
||||
MLX5_SET(wq, wq, log_wq_stride,
|
||||
mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
|
||||
MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
|
||||
|
|
|
@ -482,8 +482,11 @@ static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
|
|||
params->log_sq_size = orig->log_sq_size;
|
||||
mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param);
|
||||
}
|
||||
if (test_bit(MLX5E_PTP_STATE_RX, c->state))
|
||||
/* RQ */
|
||||
if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
|
||||
params->vlan_strip_disable = orig->vlan_strip_disable;
|
||||
mlx5e_ptp_build_rq_param(c->mdev, c->netdev, c->priv->q_counter, cparams);
|
||||
}
|
||||
}
|
||||
|
||||
static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
|
||||
|
@ -494,7 +497,7 @@ static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
|
|||
int err;
|
||||
|
||||
rq->wq_type = params->rq_wq_type;
|
||||
rq->pdev = mdev->device;
|
||||
rq->pdev = c->pdev;
|
||||
rq->netdev = priv->netdev;
|
||||
rq->priv = priv;
|
||||
rq->clock = &mdev->clock;
|
||||
|
|
|
@ -37,7 +37,7 @@ static void mlx5e_init_trap_rq(struct mlx5e_trap *t, struct mlx5e_params *params
|
|||
struct mlx5e_priv *priv = t->priv;
|
||||
|
||||
rq->wq_type = params->rq_wq_type;
|
||||
rq->pdev = mdev->device;
|
||||
rq->pdev = t->pdev;
|
||||
rq->netdev = priv->netdev;
|
||||
rq->priv = priv;
|
||||
rq->clock = &mdev->clock;
|
||||
|
|
|
@ -3384,7 +3384,7 @@ static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool en
|
|||
|
||||
static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
|
||||
{
|
||||
int err = 0;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < chs->num; i++) {
|
||||
|
@ -3392,6 +3392,8 @@ static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
|
|||
if (err)
|
||||
return err;
|
||||
}
|
||||
if (chs->ptp && test_bit(MLX5E_PTP_STATE_RX, chs->ptp->state))
|
||||
return mlx5e_modify_rq_vsd(&chs->ptp->rq, vsd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3829,6 +3831,24 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
features &= ~NETIF_F_HW_TLS_RX;
|
||||
if (netdev->features & NETIF_F_HW_TLS_RX)
|
||||
netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
|
||||
|
||||
features &= ~NETIF_F_HW_TLS_TX;
|
||||
if (netdev->features & NETIF_F_HW_TLS_TX)
|
||||
netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
|
||||
|
||||
features &= ~NETIF_F_NTUPLE;
|
||||
if (netdev->features & NETIF_F_NTUPLE)
|
||||
netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n");
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
|
@ -3860,15 +3880,8 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
|
|||
netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
|
||||
}
|
||||
|
||||
if (mlx5e_is_uplink_rep(priv)) {
|
||||
features &= ~NETIF_F_HW_TLS_RX;
|
||||
if (netdev->features & NETIF_F_HW_TLS_RX)
|
||||
netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
|
||||
|
||||
features &= ~NETIF_F_HW_TLS_TX;
|
||||
if (netdev->features & NETIF_F_HW_TLS_TX)
|
||||
netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
|
||||
}
|
||||
if (mlx5e_is_uplink_rep(priv))
|
||||
features = mlx5e_fix_uplink_rep_features(netdev, features);
|
||||
|
||||
mutex_unlock(&priv->state_lock);
|
||||
|
||||
|
@ -4859,6 +4872,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
|
|||
if (MLX5_CAP_ETH(mdev, scatter_fcs))
|
||||
netdev->hw_features |= NETIF_F_RXFCS;
|
||||
|
||||
if (mlx5_qos_is_supported(mdev))
|
||||
netdev->hw_features |= NETIF_F_HW_TC;
|
||||
|
||||
netdev->features = netdev->hw_features;
|
||||
|
||||
/* Defaults */
|
||||
|
@ -4879,8 +4895,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
|
|||
netdev->hw_features |= NETIF_F_NTUPLE;
|
||||
#endif
|
||||
}
|
||||
if (mlx5_qos_is_supported(mdev))
|
||||
netdev->features |= NETIF_F_HW_TC;
|
||||
|
||||
netdev->features |= NETIF_F_HIGHDMA;
|
||||
netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
|
||||
|
|
|
@ -452,12 +452,32 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
|
|||
static
|
||||
struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
|
||||
{
|
||||
struct mlx5_core_dev *mdev;
|
||||
struct net_device *netdev;
|
||||
struct mlx5e_priv *priv;
|
||||
|
||||
netdev = __dev_get_by_index(net, ifindex);
|
||||
netdev = dev_get_by_index(net, ifindex);
|
||||
if (!netdev)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
priv = netdev_priv(netdev);
|
||||
return priv->mdev;
|
||||
mdev = priv->mdev;
|
||||
dev_put(netdev);
|
||||
|
||||
/* Mirred tc action holds a refcount on the ifindex net_device (see
|
||||
* net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev
|
||||
* after dev_put(netdev), while we're in the context of adding a tc flow.
|
||||
*
|
||||
* The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then
|
||||
* stored in a hairpin object, which exists until all flows, that refer to it, get
|
||||
* removed.
|
||||
*
|
||||
* On the other hand, after a hairpin object has been created, the peer net_device may
|
||||
* be removed/unbound while there are still some hairpin flows that are using it. This
|
||||
* case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to
|
||||
* NETDEV_UNREGISTER event of the peer net_device.
|
||||
*/
|
||||
return mdev;
|
||||
}
|
||||
|
||||
static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
|
||||
|
@ -666,6 +686,10 @@ mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params
|
|||
|
||||
func_mdev = priv->mdev;
|
||||
peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
|
||||
if (IS_ERR(peer_mdev)) {
|
||||
err = PTR_ERR(peer_mdev);
|
||||
goto create_pair_err;
|
||||
}
|
||||
|
||||
pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
|
||||
if (IS_ERR(pair)) {
|
||||
|
@ -804,6 +828,11 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
|
|||
int err;
|
||||
|
||||
peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
|
||||
if (IS_ERR(peer_mdev)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device");
|
||||
return PTR_ERR(peer_mdev);
|
||||
}
|
||||
|
||||
if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
|
||||
return -EOPNOTSUPP;
|
||||
|
|
|
@ -636,7 +636,7 @@ struct esw_vport_tbl_namespace {
|
|||
};
|
||||
|
||||
struct mlx5_vport_tbl_attr {
|
||||
u16 chain;
|
||||
u32 chain;
|
||||
u16 prio;
|
||||
u16 vport;
|
||||
const struct esw_vport_tbl_namespace *vport_ns;
|
||||
|
|
|
@ -382,10 +382,11 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
|
|||
{
|
||||
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
|
||||
dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
|
||||
dest[dest_idx].vport.vhca_id =
|
||||
MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
|
||||
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
|
||||
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
|
||||
dest[dest_idx].vport.vhca_id =
|
||||
MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
|
||||
dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
|
||||
}
|
||||
if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
|
||||
if (pkt_reformat) {
|
||||
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
|
||||
|
@ -2367,6 +2368,9 @@ static int mlx5_esw_offloads_devcom_event(int event,
|
|||
|
||||
switch (event) {
|
||||
case ESW_OFFLOADS_DEVCOM_PAIR:
|
||||
if (mlx5_get_next_phys_dev(esw->dev) != peer_esw->dev)
|
||||
break;
|
||||
|
||||
if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
|
||||
mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
|
||||
break;
|
||||
|
|
|
@ -1024,17 +1024,19 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev,
|
|||
static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
|
||||
struct fs_prio *prio)
|
||||
{
|
||||
struct mlx5_flow_table *next_ft;
|
||||
struct mlx5_flow_table *next_ft, *first_ft;
|
||||
int err = 0;
|
||||
|
||||
/* Connect_prev_fts and update_root_ft_create are mutually exclusive */
|
||||
|
||||
if (list_empty(&prio->node.children)) {
|
||||
first_ft = list_first_entry_or_null(&prio->node.children,
|
||||
struct mlx5_flow_table, node.list);
|
||||
if (!first_ft || first_ft->level > ft->level) {
|
||||
err = connect_prev_fts(dev, ft, prio);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
next_ft = find_next_chained_ft(prio);
|
||||
next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
|
||||
err = connect_fwd_rules(dev, ft, next_ft);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -2120,7 +2122,7 @@ static int disconnect_flow_table(struct mlx5_flow_table *ft)
|
|||
node.list) == ft))
|
||||
return 0;
|
||||
|
||||
next_ft = find_next_chained_ft(prio);
|
||||
next_ft = find_next_ft(ft);
|
||||
err = connect_fwd_rules(dev, next_ft, ft);
|
||||
if (err)
|
||||
return err;
|
||||
|
|
|
@ -626,8 +626,16 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
|
|||
}
|
||||
fw_reporter_ctx.err_synd = health->synd;
|
||||
fw_reporter_ctx.miss_counter = health->miss_counter;
|
||||
devlink_health_report(health->fw_fatal_reporter,
|
||||
"FW fatal error reported", &fw_reporter_ctx);
|
||||
if (devlink_health_report(health->fw_fatal_reporter,
|
||||
"FW fatal error reported", &fw_reporter_ctx) == -ECANCELED) {
|
||||
/* If recovery wasn't performed, due to grace period,
|
||||
* unload the driver. This ensures that the driver
|
||||
* closes all its resources and it is not subjected to
|
||||
* requests from the kernel.
|
||||
*/
|
||||
mlx5_core_err(dev, "Driver is in error state. Unloading\n");
|
||||
mlx5_unload_one(dev);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
|
||||
|
|
|
@ -29,7 +29,7 @@ static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
|
|||
*/
|
||||
};
|
||||
|
||||
static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
|
||||
static void ionic_lif_rx_mode(struct ionic_lif *lif);
|
||||
static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
|
||||
static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
|
||||
static void ionic_link_status_check(struct ionic_lif *lif);
|
||||
|
@ -53,7 +53,19 @@ static void ionic_dim_work(struct work_struct *work)
|
|||
cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
|
||||
qcq = container_of(dim, struct ionic_qcq, dim);
|
||||
new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec);
|
||||
qcq->intr.dim_coal_hw = new_coal ? new_coal : 1;
|
||||
new_coal = new_coal ? new_coal : 1;
|
||||
|
||||
if (qcq->intr.dim_coal_hw != new_coal) {
|
||||
unsigned int qi = qcq->cq.bound_q->index;
|
||||
struct ionic_lif *lif = qcq->q.lif;
|
||||
|
||||
qcq->intr.dim_coal_hw = new_coal;
|
||||
|
||||
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
|
||||
lif->rxqcqs[qi]->intr.index,
|
||||
qcq->intr.dim_coal_hw);
|
||||
}
|
||||
|
||||
dim->state = DIM_START_MEASURE;
|
||||
}
|
||||
|
||||
|
@ -77,7 +89,7 @@ static void ionic_lif_deferred_work(struct work_struct *work)
|
|||
|
||||
switch (w->type) {
|
||||
case IONIC_DW_TYPE_RX_MODE:
|
||||
ionic_lif_rx_mode(lif, w->rx_mode);
|
||||
ionic_lif_rx_mode(lif);
|
||||
break;
|
||||
case IONIC_DW_TYPE_RX_ADDR_ADD:
|
||||
ionic_lif_addr_add(lif, w->addr);
|
||||
|
@ -1301,10 +1313,8 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add,
|
||||
bool can_sleep)
|
||||
static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
|
||||
{
|
||||
struct ionic_deferred_work *work;
|
||||
unsigned int nmfilters;
|
||||
unsigned int nufilters;
|
||||
|
||||
|
@ -1330,63 +1340,77 @@ static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add,
|
|||
lif->nucast--;
|
||||
}
|
||||
|
||||
if (!can_sleep) {
|
||||
work = kzalloc(sizeof(*work), GFP_ATOMIC);
|
||||
if (!work)
|
||||
return -ENOMEM;
|
||||
work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD :
|
||||
IONIC_DW_TYPE_RX_ADDR_DEL;
|
||||
memcpy(work->addr, addr, ETH_ALEN);
|
||||
netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n",
|
||||
add ? "add" : "del", addr);
|
||||
ionic_lif_deferred_enqueue(&lif->deferred, work);
|
||||
} else {
|
||||
netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
|
||||
add ? "add" : "del", addr);
|
||||
if (add)
|
||||
return ionic_lif_addr_add(lif, addr);
|
||||
else
|
||||
return ionic_lif_addr_del(lif, addr);
|
||||
}
|
||||
netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
|
||||
add ? "add" : "del", addr);
|
||||
if (add)
|
||||
return ionic_lif_addr_add(lif, addr);
|
||||
else
|
||||
return ionic_lif_addr_del(lif, addr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
|
||||
{
|
||||
return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_SLEEP);
|
||||
}
|
||||
|
||||
static int ionic_ndo_addr_add(struct net_device *netdev, const u8 *addr)
|
||||
{
|
||||
return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_NOT_SLEEP);
|
||||
return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR);
|
||||
}
|
||||
|
||||
static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
|
||||
{
|
||||
return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_SLEEP);
|
||||
return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR);
|
||||
}
|
||||
|
||||
static int ionic_ndo_addr_del(struct net_device *netdev, const u8 *addr)
|
||||
static void ionic_lif_rx_mode(struct ionic_lif *lif)
|
||||
{
|
||||
return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_NOT_SLEEP);
|
||||
}
|
||||
|
||||
static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
|
||||
{
|
||||
struct ionic_admin_ctx ctx = {
|
||||
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
|
||||
.cmd.rx_mode_set = {
|
||||
.opcode = IONIC_CMD_RX_MODE_SET,
|
||||
.lif_index = cpu_to_le16(lif->index),
|
||||
.rx_mode = cpu_to_le16(rx_mode),
|
||||
},
|
||||
};
|
||||
struct net_device *netdev = lif->netdev;
|
||||
unsigned int nfilters;
|
||||
unsigned int nd_flags;
|
||||
char buf[128];
|
||||
int err;
|
||||
u16 rx_mode;
|
||||
int i;
|
||||
#define REMAIN(__x) (sizeof(buf) - (__x))
|
||||
|
||||
mutex_lock(&lif->config_lock);
|
||||
|
||||
/* grab the flags once for local use */
|
||||
nd_flags = netdev->flags;
|
||||
|
||||
rx_mode = IONIC_RX_MODE_F_UNICAST;
|
||||
rx_mode |= (nd_flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
|
||||
rx_mode |= (nd_flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
|
||||
rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
|
||||
rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
|
||||
|
||||
/* sync unicast addresses
|
||||
* next check to see if we're in an overflow state
|
||||
* if so, we track that we overflowed and enable NIC PROMISC
|
||||
* else if the overflow is set and not needed
|
||||
* we remove our overflow flag and check the netdev flags
|
||||
* to see if we can disable NIC PROMISC
|
||||
*/
|
||||
__dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
|
||||
nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
|
||||
if (netdev_uc_count(netdev) + 1 > nfilters) {
|
||||
rx_mode |= IONIC_RX_MODE_F_PROMISC;
|
||||
lif->uc_overflow = true;
|
||||
} else if (lif->uc_overflow) {
|
||||
lif->uc_overflow = false;
|
||||
if (!(nd_flags & IFF_PROMISC))
|
||||
rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
|
||||
}
|
||||
|
||||
/* same for multicast */
|
||||
__dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
|
||||
nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters);
|
||||
if (netdev_mc_count(netdev) > nfilters) {
|
||||
rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
|
||||
lif->mc_overflow = true;
|
||||
} else if (lif->mc_overflow) {
|
||||
lif->mc_overflow = false;
|
||||
if (!(nd_flags & IFF_ALLMULTI))
|
||||
rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
|
||||
}
|
||||
|
||||
i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
|
||||
lif->rx_mode, rx_mode);
|
||||
if (rx_mode & IONIC_RX_MODE_F_UNICAST)
|
||||
|
@ -1399,79 +1423,48 @@ static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
|
|||
i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
|
||||
if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
|
||||
i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
|
||||
netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
|
||||
if (rx_mode & IONIC_RX_MODE_F_RDMA_SNIFFER)
|
||||
i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_RDMA_SNIFFER");
|
||||
netdev_dbg(netdev, "lif%d %s\n", lif->index, buf);
|
||||
|
||||
err = ionic_adminq_post_wait(lif, &ctx);
|
||||
if (err)
|
||||
netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n",
|
||||
rx_mode, err);
|
||||
else
|
||||
lif->rx_mode = rx_mode;
|
||||
if (lif->rx_mode != rx_mode) {
|
||||
struct ionic_admin_ctx ctx = {
|
||||
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
|
||||
.cmd.rx_mode_set = {
|
||||
.opcode = IONIC_CMD_RX_MODE_SET,
|
||||
.lif_index = cpu_to_le16(lif->index),
|
||||
},
|
||||
};
|
||||
int err;
|
||||
|
||||
ctx.cmd.rx_mode_set.rx_mode = cpu_to_le16(rx_mode);
|
||||
err = ionic_adminq_post_wait(lif, &ctx);
|
||||
if (err)
|
||||
netdev_warn(netdev, "set rx_mode 0x%04x failed: %d\n",
|
||||
rx_mode, err);
|
||||
else
|
||||
lif->rx_mode = rx_mode;
|
||||
}
|
||||
|
||||
mutex_unlock(&lif->config_lock);
|
||||
}
|
||||
|
||||
static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep)
|
||||
{
|
||||
struct ionic_lif *lif = netdev_priv(netdev);
|
||||
struct ionic_deferred_work *work;
|
||||
unsigned int nfilters;
|
||||
unsigned int rx_mode;
|
||||
|
||||
rx_mode = IONIC_RX_MODE_F_UNICAST;
|
||||
rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
|
||||
rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
|
||||
rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
|
||||
rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
|
||||
|
||||
/* sync unicast addresses
|
||||
* next check to see if we're in an overflow state
|
||||
* if so, we track that we overflowed and enable NIC PROMISC
|
||||
* else if the overflow is set and not needed
|
||||
* we remove our overflow flag and check the netdev flags
|
||||
* to see if we can disable NIC PROMISC
|
||||
*/
|
||||
if (can_sleep)
|
||||
__dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
|
||||
else
|
||||
__dev_uc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
|
||||
nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
|
||||
if (netdev_uc_count(netdev) + 1 > nfilters) {
|
||||
rx_mode |= IONIC_RX_MODE_F_PROMISC;
|
||||
lif->uc_overflow = true;
|
||||
} else if (lif->uc_overflow) {
|
||||
lif->uc_overflow = false;
|
||||
if (!(netdev->flags & IFF_PROMISC))
|
||||
rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
|
||||
}
|
||||
|
||||
/* same for multicast */
|
||||
if (can_sleep)
|
||||
__dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
|
||||
else
|
||||
__dev_mc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
|
||||
nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters);
|
||||
if (netdev_mc_count(netdev) > nfilters) {
|
||||
rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
|
||||
lif->mc_overflow = true;
|
||||
} else if (lif->mc_overflow) {
|
||||
lif->mc_overflow = false;
|
||||
if (!(netdev->flags & IFF_ALLMULTI))
|
||||
rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
|
||||
}
|
||||
|
||||
if (lif->rx_mode != rx_mode) {
|
||||
if (!can_sleep) {
|
||||
work = kzalloc(sizeof(*work), GFP_ATOMIC);
|
||||
if (!work) {
|
||||
netdev_err(lif->netdev, "rxmode change dropped\n");
|
||||
return;
|
||||
}
|
||||
work->type = IONIC_DW_TYPE_RX_MODE;
|
||||
work->rx_mode = rx_mode;
|
||||
netdev_dbg(lif->netdev, "deferred: rx_mode\n");
|
||||
ionic_lif_deferred_enqueue(&lif->deferred, work);
|
||||
} else {
|
||||
ionic_lif_rx_mode(lif, rx_mode);
|
||||
if (!can_sleep) {
|
||||
work = kzalloc(sizeof(*work), GFP_ATOMIC);
|
||||
if (!work) {
|
||||
netdev_err(lif->netdev, "rxmode change dropped\n");
|
||||
return;
|
||||
}
|
||||
work->type = IONIC_DW_TYPE_RX_MODE;
|
||||
netdev_dbg(lif->netdev, "deferred: rx_mode\n");
|
||||
ionic_lif_deferred_enqueue(&lif->deferred, work);
|
||||
} else {
|
||||
ionic_lif_rx_mode(lif);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3058,6 +3051,7 @@ void ionic_lif_deinit(struct ionic_lif *lif)
|
|||
ionic_lif_qcq_deinit(lif, lif->notifyqcq);
|
||||
ionic_lif_qcq_deinit(lif, lif->adminqcq);
|
||||
|
||||
mutex_destroy(&lif->config_lock);
|
||||
mutex_destroy(&lif->queue_lock);
|
||||
ionic_lif_reset(lif);
|
||||
}
|
||||
|
@ -3185,7 +3179,7 @@ static int ionic_station_set(struct ionic_lif *lif)
|
|||
*/
|
||||
if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
|
||||
netdev->dev_addr))
|
||||
ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP);
|
||||
ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR);
|
||||
} else {
|
||||
/* Update the netdev mac with the device's mac */
|
||||
memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
|
||||
|
@ -3202,7 +3196,7 @@ static int ionic_station_set(struct ionic_lif *lif)
|
|||
|
||||
netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
|
||||
netdev->dev_addr);
|
||||
ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP);
|
||||
ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3225,6 +3219,7 @@ int ionic_lif_init(struct ionic_lif *lif)
|
|||
|
||||
lif->hw_index = le16_to_cpu(comp.hw_index);
|
||||
mutex_init(&lif->queue_lock);
|
||||
mutex_init(&lif->config_lock);
|
||||
|
||||
/* now that we have the hw_index we can figure out our doorbell page */
|
||||
lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
|
||||
|
|
|
@ -108,7 +108,6 @@ struct ionic_deferred_work {
|
|||
struct list_head list;
|
||||
enum ionic_deferred_work_type type;
|
||||
union {
|
||||
unsigned int rx_mode;
|
||||
u8 addr[ETH_ALEN];
|
||||
u8 fw_status;
|
||||
};
|
||||
|
@ -179,6 +178,7 @@ struct ionic_lif {
|
|||
unsigned int index;
|
||||
unsigned int hw_index;
|
||||
struct mutex queue_lock; /* lock for queue structures */
|
||||
struct mutex config_lock; /* lock for config actions */
|
||||
spinlock_t adminq_lock; /* lock for AdminQ operations */
|
||||
struct ionic_qcq *adminqcq;
|
||||
struct ionic_qcq *notifyqcq;
|
||||
|
@ -199,7 +199,7 @@ struct ionic_lif {
|
|||
unsigned int nrxq_descs;
|
||||
u32 rx_copybreak;
|
||||
u64 rxq_features;
|
||||
unsigned int rx_mode;
|
||||
u16 rx_mode;
|
||||
u64 hw_features;
|
||||
bool registered;
|
||||
bool mc_overflow;
|
||||
|
@ -302,7 +302,7 @@ int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
|
|||
int ionic_lif_size(struct ionic *ionic);
|
||||
|
||||
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
|
||||
int ionic_lif_hwstamp_replay(struct ionic_lif *lif);
|
||||
void ionic_lif_hwstamp_replay(struct ionic_lif *lif);
|
||||
int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr);
|
||||
int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr);
|
||||
ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 counter);
|
||||
|
@ -311,10 +311,7 @@ void ionic_lif_unregister_phc(struct ionic_lif *lif);
|
|||
void ionic_lif_alloc_phc(struct ionic_lif *lif);
|
||||
void ionic_lif_free_phc(struct ionic_lif *lif);
|
||||
#else
|
||||
static inline int ionic_lif_hwstamp_replay(struct ionic_lif *lif)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
static inline void ionic_lif_hwstamp_replay(struct ionic_lif *lif) {}
|
||||
|
||||
static inline int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
|
||||
{
|
||||
|
|
|
@ -188,6 +188,9 @@ int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
|
|||
struct hwtstamp_config config;
|
||||
int err;
|
||||
|
||||
if (!lif->phc || !lif->phc->ptp)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
|
||||
return -EFAULT;
|
||||
|
||||
|
@ -203,15 +206,16 @@ int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int ionic_lif_hwstamp_replay(struct ionic_lif *lif)
|
||||
void ionic_lif_hwstamp_replay(struct ionic_lif *lif)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!lif->phc || !lif->phc->ptp)
|
||||
return;
|
||||
|
||||
err = ionic_lif_hwstamp_set_ts_config(lif, NULL);
|
||||
if (err)
|
||||
netdev_info(lif->netdev, "hwstamp replay failed: %d\n", err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr)
|
||||
|
|
|
@ -274,12 +274,11 @@ static void ionic_rx_clean(struct ionic_queue *q,
|
|||
}
|
||||
}
|
||||
|
||||
if (likely(netdev->features & NETIF_F_RXCSUM)) {
|
||||
if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
|
||||
skb->ip_summed = CHECKSUM_COMPLETE;
|
||||
skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
|
||||
stats->csum_complete++;
|
||||
}
|
||||
if (likely(netdev->features & NETIF_F_RXCSUM) &&
|
||||
(comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC)) {
|
||||
skb->ip_summed = CHECKSUM_COMPLETE;
|
||||
skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
|
||||
stats->csum_complete++;
|
||||
} else {
|
||||
stats->csum_none++;
|
||||
}
|
||||
|
@ -451,11 +450,12 @@ void ionic_rx_empty(struct ionic_queue *q)
|
|||
q->tail_idx = 0;
|
||||
}
|
||||
|
||||
static void ionic_dim_update(struct ionic_qcq *qcq)
|
||||
static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode)
|
||||
{
|
||||
struct dim_sample dim_sample;
|
||||
struct ionic_lif *lif;
|
||||
unsigned int qi;
|
||||
u64 pkts, bytes;
|
||||
|
||||
if (!qcq->intr.dim_coal_hw)
|
||||
return;
|
||||
|
@ -463,14 +463,23 @@ static void ionic_dim_update(struct ionic_qcq *qcq)
|
|||
lif = qcq->q.lif;
|
||||
qi = qcq->cq.bound_q->index;
|
||||
|
||||
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
|
||||
lif->rxqcqs[qi]->intr.index,
|
||||
qcq->intr.dim_coal_hw);
|
||||
switch (napi_mode) {
|
||||
case IONIC_LIF_F_TX_DIM_INTR:
|
||||
pkts = lif->txqstats[qi].pkts;
|
||||
bytes = lif->txqstats[qi].bytes;
|
||||
break;
|
||||
case IONIC_LIF_F_RX_DIM_INTR:
|
||||
pkts = lif->rxqstats[qi].pkts;
|
||||
bytes = lif->rxqstats[qi].bytes;
|
||||
break;
|
||||
default:
|
||||
pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts;
|
||||
bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes;
|
||||
break;
|
||||
}
|
||||
|
||||
dim_update_sample(qcq->cq.bound_intr->rearm_count,
|
||||
lif->txqstats[qi].pkts,
|
||||
lif->txqstats[qi].bytes,
|
||||
&dim_sample);
|
||||
pkts, bytes, &dim_sample);
|
||||
|
||||
net_dim(&qcq->dim, dim_sample);
|
||||
}
|
||||
|
@ -491,7 +500,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
|
|||
ionic_tx_service, NULL, NULL);
|
||||
|
||||
if (work_done < budget && napi_complete_done(napi, work_done)) {
|
||||
ionic_dim_update(qcq);
|
||||
ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR);
|
||||
flags |= IONIC_INTR_CRED_UNMASK;
|
||||
cq->bound_intr->rearm_count++;
|
||||
}
|
||||
|
@ -530,7 +539,7 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
|
|||
ionic_rx_fill(cq->bound_q);
|
||||
|
||||
if (work_done < budget && napi_complete_done(napi, work_done)) {
|
||||
ionic_dim_update(qcq);
|
||||
ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
|
||||
flags |= IONIC_INTR_CRED_UNMASK;
|
||||
cq->bound_intr->rearm_count++;
|
||||
}
|
||||
|
@ -576,7 +585,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
|
|||
ionic_rx_fill(rxcq->bound_q);
|
||||
|
||||
if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
|
||||
ionic_dim_update(qcq);
|
||||
ionic_dim_update(qcq, 0);
|
||||
flags |= IONIC_INTR_CRED_UNMASK;
|
||||
rxcq->bound_intr->rearm_count++;
|
||||
}
|
||||
|
|
|
@ -831,7 +831,7 @@ int qede_configure_vlan_filters(struct qede_dev *edev)
|
|||
int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct qede_dev *edev = netdev_priv(dev);
|
||||
struct qede_vlan *vlan = NULL;
|
||||
struct qede_vlan *vlan;
|
||||
int rc = 0;
|
||||
|
||||
DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
|
||||
|
@ -842,7 +842,7 @@ int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
|
|||
if (vlan->vid == vid)
|
||||
break;
|
||||
|
||||
if (!vlan || (vlan->vid != vid)) {
|
||||
if (list_entry_is_head(vlan, &edev->vlan_list, list)) {
|
||||
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
|
||||
"Vlan isn't configured\n");
|
||||
goto out;
|
||||
|
|
|
@ -154,7 +154,7 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
|
|||
"driver lock acquired\n");
|
||||
return 1;
|
||||
}
|
||||
ssleep(1);
|
||||
mdelay(1000);
|
||||
} while (++i < 10);
|
||||
|
||||
netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
|
||||
|
@ -3274,7 +3274,7 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
|
|||
if ((value & ISP_CONTROL_SR) == 0)
|
||||
break;
|
||||
|
||||
ssleep(1);
|
||||
mdelay(1000);
|
||||
} while ((--max_wait_time));
|
||||
|
||||
/*
|
||||
|
@ -3310,7 +3310,7 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
|
|||
ispControlStatus);
|
||||
if ((value & ISP_CONTROL_FSR) == 0)
|
||||
break;
|
||||
ssleep(1);
|
||||
mdelay(1000);
|
||||
} while ((--max_wait_time));
|
||||
}
|
||||
if (max_wait_time == 0)
|
||||
|
|
|
@ -443,7 +443,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
|
|||
#endif
|
||||
|
||||
/* setup various bits in PCI command register */
|
||||
ret = pci_enable_device(pci_dev);
|
||||
ret = pcim_enable_device(pci_dev);
|
||||
if(ret) return ret;
|
||||
|
||||
i = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
|
||||
|
@ -469,7 +469,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
|
|||
ioaddr = pci_iomap(pci_dev, 0, 0);
|
||||
if (!ioaddr) {
|
||||
ret = -ENOMEM;
|
||||
goto err_out_cleardev;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
sis_priv = netdev_priv(net_dev);
|
||||
|
@ -581,8 +581,6 @@ static int sis900_probe(struct pci_dev *pci_dev,
|
|||
sis_priv->tx_ring_dma);
|
||||
err_out_unmap:
|
||||
pci_iounmap(pci_dev, ioaddr);
|
||||
err_out_cleardev:
|
||||
pci_release_regions(pci_dev);
|
||||
err_out:
|
||||
free_netdev(net_dev);
|
||||
return ret;
|
||||
|
@ -2499,7 +2497,6 @@ static void sis900_remove(struct pci_dev *pci_dev)
|
|||
sis_priv->tx_ring_dma);
|
||||
pci_iounmap(pci_dev, sis_priv->ioaddr);
|
||||
free_netdev(net_dev);
|
||||
pci_release_regions(pci_dev);
|
||||
}
|
||||
|
||||
static int __maybe_unused sis900_suspend(struct device *dev)
|
||||
|
|
|
@ -1249,6 +1249,7 @@ const struct stmmac_ops dwmac410_ops = {
|
|||
.config_l3_filter = dwmac4_config_l3_filter,
|
||||
.config_l4_filter = dwmac4_config_l4_filter,
|
||||
.est_configure = dwmac5_est_configure,
|
||||
.est_irq_status = dwmac5_est_irq_status,
|
||||
.fpe_configure = dwmac5_fpe_configure,
|
||||
.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
|
||||
.fpe_irq_status = dwmac5_fpe_irq_status,
|
||||
|
@ -1300,6 +1301,7 @@ const struct stmmac_ops dwmac510_ops = {
|
|||
.config_l3_filter = dwmac4_config_l3_filter,
|
||||
.config_l4_filter = dwmac4_config_l4_filter,
|
||||
.est_configure = dwmac5_est_configure,
|
||||
.est_irq_status = dwmac5_est_irq_status,
|
||||
.fpe_configure = dwmac5_fpe_configure,
|
||||
.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
|
||||
.fpe_irq_status = dwmac5_fpe_irq_status,
|
||||
|
|
|
@ -8191,8 +8191,9 @@ static int niu_pci_vpd_fetch(struct niu *np, u32 start)
|
|||
err = niu_pci_vpd_scan_props(np, here, end);
|
||||
if (err < 0)
|
||||
return err;
|
||||
/* ret == 1 is not an error */
|
||||
if (err == 1)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -288,7 +288,7 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
|
|||
if (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) {
|
||||
if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E ||
|
||||
BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810 ||
|
||||
BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E)
|
||||
BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54811)
|
||||
val |= BCM54XX_SHD_SCR3_RXCTXC_DIS;
|
||||
else
|
||||
val |= BCM54XX_SHD_SCR3_TRDDAPD;
|
||||
|
|
|
@ -136,6 +136,29 @@ static struct ieee80211_supported_band band_5ghz = {
|
|||
/* Assigned at module init. Guaranteed locally-administered and unicast. */
|
||||
static u8 fake_router_bssid[ETH_ALEN] __ro_after_init = {};
|
||||
|
||||
static void virt_wifi_inform_bss(struct wiphy *wiphy)
|
||||
{
|
||||
u64 tsf = div_u64(ktime_get_boottime_ns(), 1000);
|
||||
struct cfg80211_bss *informed_bss;
|
||||
static const struct {
|
||||
u8 tag;
|
||||
u8 len;
|
||||
u8 ssid[8];
|
||||
} __packed ssid = {
|
||||
.tag = WLAN_EID_SSID,
|
||||
.len = 8,
|
||||
.ssid = "VirtWifi",
|
||||
};
|
||||
|
||||
informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
|
||||
CFG80211_BSS_FTYPE_PRESP,
|
||||
fake_router_bssid, tsf,
|
||||
WLAN_CAPABILITY_ESS, 0,
|
||||
(void *)&ssid, sizeof(ssid),
|
||||
DBM_TO_MBM(-50), GFP_KERNEL);
|
||||
cfg80211_put_bss(wiphy, informed_bss);
|
||||
}
|
||||
|
||||
/* Called with the rtnl lock held. */
|
||||
static int virt_wifi_scan(struct wiphy *wiphy,
|
||||
struct cfg80211_scan_request *request)
|
||||
|
@ -156,28 +179,13 @@ static int virt_wifi_scan(struct wiphy *wiphy,
|
|||
/* Acquires and releases the rdev BSS lock. */
|
||||
static void virt_wifi_scan_result(struct work_struct *work)
|
||||
{
|
||||
struct {
|
||||
u8 tag;
|
||||
u8 len;
|
||||
u8 ssid[8];
|
||||
} __packed ssid = {
|
||||
.tag = WLAN_EID_SSID, .len = 8, .ssid = "VirtWifi",
|
||||
};
|
||||
struct cfg80211_bss *informed_bss;
|
||||
struct virt_wifi_wiphy_priv *priv =
|
||||
container_of(work, struct virt_wifi_wiphy_priv,
|
||||
scan_result.work);
|
||||
struct wiphy *wiphy = priv_to_wiphy(priv);
|
||||
struct cfg80211_scan_info scan_info = { .aborted = false };
|
||||
u64 tsf = div_u64(ktime_get_boottime_ns(), 1000);
|
||||
|
||||
informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
|
||||
CFG80211_BSS_FTYPE_PRESP,
|
||||
fake_router_bssid, tsf,
|
||||
WLAN_CAPABILITY_ESS, 0,
|
||||
(void *)&ssid, sizeof(ssid),
|
||||
DBM_TO_MBM(-50), GFP_KERNEL);
|
||||
cfg80211_put_bss(wiphy, informed_bss);
|
||||
virt_wifi_inform_bss(wiphy);
|
||||
|
||||
/* Schedules work which acquires and releases the rtnl lock. */
|
||||
cfg80211_scan_done(priv->scan_request, &scan_info);
|
||||
|
@ -225,10 +233,12 @@ static int virt_wifi_connect(struct wiphy *wiphy, struct net_device *netdev,
|
|||
if (!could_schedule)
|
||||
return -EBUSY;
|
||||
|
||||
if (sme->bssid)
|
||||
if (sme->bssid) {
|
||||
ether_addr_copy(priv->connect_requested_bss, sme->bssid);
|
||||
else
|
||||
} else {
|
||||
virt_wifi_inform_bss(wiphy);
|
||||
eth_zero_addr(priv->connect_requested_bss);
|
||||
}
|
||||
|
||||
wiphy_debug(wiphy, "connect\n");
|
||||
|
||||
|
@ -241,11 +251,13 @@ static void virt_wifi_connect_complete(struct work_struct *work)
|
|||
struct virt_wifi_netdev_priv *priv =
|
||||
container_of(work, struct virt_wifi_netdev_priv, connect.work);
|
||||
u8 *requested_bss = priv->connect_requested_bss;
|
||||
bool has_addr = !is_zero_ether_addr(requested_bss);
|
||||
bool right_addr = ether_addr_equal(requested_bss, fake_router_bssid);
|
||||
u16 status = WLAN_STATUS_SUCCESS;
|
||||
|
||||
if (!priv->is_up || (has_addr && !right_addr))
|
||||
if (is_zero_ether_addr(requested_bss))
|
||||
requested_bss = NULL;
|
||||
|
||||
if (!priv->is_up || (requested_bss && !right_addr))
|
||||
status = WLAN_STATUS_UNSPECIFIED_FAILURE;
|
||||
else
|
||||
priv->is_connected = true;
|
||||
|
|
|
@ -984,6 +984,8 @@ static void wwan_create_default_link(struct wwan_device *wwandev,
|
|||
goto unlock;
|
||||
}
|
||||
|
||||
rtnl_configure_link(dev, NULL); /* Link initialized, notify new link */
|
||||
|
||||
unlock:
|
||||
rtnl_unlock();
|
||||
|
||||
|
|
|
@ -192,8 +192,7 @@ static void nfcsim_recv_wq(struct work_struct *work)
|
|||
|
||||
if (!IS_ERR(skb))
|
||||
dev_kfree_skb(skb);
|
||||
|
||||
skb = ERR_PTR(-ENODEV);
|
||||
return;
|
||||
}
|
||||
|
||||
dev->cb(dev->nfc_digital_dev, dev->arg, skb);
|
||||
|
|
|
@ -423,7 +423,7 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
|
|||
if (IS_ERR(tfm)) {
|
||||
ret = PTR_ERR(tfm);
|
||||
dev_err(&fw_info->ndev->nfc_dev->dev,
|
||||
"Cannot allocate shash (code=%d)\n", ret);
|
||||
"Cannot allocate shash (code=%pe)\n", tfm);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -134,4 +134,5 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_CGROUP, cgroup)
|
|||
BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter)
|
||||
#ifdef CONFIG_NET
|
||||
BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns)
|
||||
BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp)
|
||||
#endif
|
||||
|
|
|
@ -340,8 +340,8 @@ struct bpf_insn_aux_data {
|
|||
};
|
||||
u64 map_key_state; /* constant (32 bit) key tracking for maps */
|
||||
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
|
||||
int sanitize_stack_off; /* stack slot to be cleared */
|
||||
u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
|
||||
bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
|
||||
bool zext_dst; /* this insn zero extends dst reg */
|
||||
u8 alu_state; /* used in combination with alu_limit */
|
||||
|
||||
|
@ -414,6 +414,7 @@ struct bpf_verifier_env {
|
|||
u32 used_map_cnt; /* number of used maps */
|
||||
u32 used_btf_cnt; /* number of used BTF objects */
|
||||
u32 id_gen; /* used to generate unique reg IDs */
|
||||
bool explore_alu_limits;
|
||||
bool allow_ptr_leaks;
|
||||
bool allow_uninit_stack;
|
||||
bool allow_ptr_to_map_access;
|
||||
|
|
|
@ -73,6 +73,11 @@ struct ctl_table_header;
|
|||
/* unused opcode to mark call to interpreter with arguments */
|
||||
#define BPF_CALL_ARGS 0xe0
|
||||
|
||||
/* unused opcode to mark speculation barrier for mitigating
|
||||
* Speculative Store Bypass
|
||||
*/
|
||||
#define BPF_NOSPEC 0xc0
|
||||
|
||||
/* As per nm, we expose JITed images as text (code) section for
|
||||
* kallsyms. That way, tools like perf can find it to match
|
||||
* addresses.
|
||||
|
@ -390,6 +395,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
|
|||
.off = 0, \
|
||||
.imm = 0 })
|
||||
|
||||
/* Speculation barrier */
|
||||
|
||||
#define BPF_ST_NOSPEC() \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ST | BPF_NOSPEC, \
|
||||
.dst_reg = 0, \
|
||||
.src_reg = 0, \
|
||||
.off = 0, \
|
||||
.imm = 0 })
|
||||
|
||||
/* Internal classic blocks for direct assignment */
|
||||
|
||||
#define __BPF_STMT(CODE, K) \
|
||||
|
|
|
@ -285,11 +285,45 @@ static inline struct sk_psock *sk_psock(const struct sock *sk)
|
|||
return rcu_dereference_sk_user_data(sk);
|
||||
}
|
||||
|
||||
static inline void sk_psock_set_state(struct sk_psock *psock,
|
||||
enum sk_psock_state_bits bit)
|
||||
{
|
||||
set_bit(bit, &psock->state);
|
||||
}
|
||||
|
||||
static inline void sk_psock_clear_state(struct sk_psock *psock,
|
||||
enum sk_psock_state_bits bit)
|
||||
{
|
||||
clear_bit(bit, &psock->state);
|
||||
}
|
||||
|
||||
static inline bool sk_psock_test_state(const struct sk_psock *psock,
|
||||
enum sk_psock_state_bits bit)
|
||||
{
|
||||
return test_bit(bit, &psock->state);
|
||||
}
|
||||
|
||||
static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
sk_drops_add(sk, skb);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
static inline void drop_sk_msg(struct sk_psock *psock, struct sk_msg *msg)
|
||||
{
|
||||
if (msg->skb)
|
||||
sock_drop(psock->sk, msg->skb);
|
||||
kfree(msg);
|
||||
}
|
||||
|
||||
static inline void sk_psock_queue_msg(struct sk_psock *psock,
|
||||
struct sk_msg *msg)
|
||||
{
|
||||
spin_lock_bh(&psock->ingress_lock);
|
||||
list_add_tail(&msg->list, &psock->ingress_msg);
|
||||
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
|
||||
list_add_tail(&msg->list, &psock->ingress_msg);
|
||||
else
|
||||
drop_sk_msg(psock, msg);
|
||||
spin_unlock_bh(&psock->ingress_lock);
|
||||
}
|
||||
|
||||
|
@ -406,24 +440,6 @@ static inline void sk_psock_restore_proto(struct sock *sk,
|
|||
psock->psock_update_sk_prot(sk, psock, true);
|
||||
}
|
||||
|
||||
static inline void sk_psock_set_state(struct sk_psock *psock,
|
||||
enum sk_psock_state_bits bit)
|
||||
{
|
||||
set_bit(bit, &psock->state);
|
||||
}
|
||||
|
||||
static inline void sk_psock_clear_state(struct sk_psock *psock,
|
||||
enum sk_psock_state_bits bit)
|
||||
{
|
||||
clear_bit(bit, &psock->state);
|
||||
}
|
||||
|
||||
static inline bool sk_psock_test_state(const struct sk_psock *psock,
|
||||
enum sk_psock_state_bits bit)
|
||||
{
|
||||
return test_bit(bit, &psock->state);
|
||||
}
|
||||
|
||||
static inline struct sk_psock *sk_psock_get(struct sock *sk)
|
||||
{
|
||||
struct sk_psock *psock;
|
||||
|
|
|
@ -15,9 +15,11 @@
|
|||
#include <linux/if_ether.h>
|
||||
|
||||
/* Lengths of frame formats */
|
||||
#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */
|
||||
#define LLC_PDU_LEN_S 4
|
||||
#define LLC_PDU_LEN_U 3 /* header and 1 control byte */
|
||||
#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */
|
||||
#define LLC_PDU_LEN_S 4
|
||||
#define LLC_PDU_LEN_U 3 /* header and 1 control byte */
|
||||
/* header and 1 control byte and XID info */
|
||||
#define LLC_PDU_LEN_U_XID (LLC_PDU_LEN_U + sizeof(struct llc_xid_info))
|
||||
/* Known SAP addresses */
|
||||
#define LLC_GLOBAL_SAP 0xFF
|
||||
#define LLC_NULL_SAP 0x00 /* not network-layer visible */
|
||||
|
@ -50,9 +52,10 @@
|
|||
#define LLC_PDU_TYPE_U_MASK 0x03 /* 8-bit control field */
|
||||
#define LLC_PDU_TYPE_MASK 0x03
|
||||
|
||||
#define LLC_PDU_TYPE_I 0 /* first bit */
|
||||
#define LLC_PDU_TYPE_S 1 /* first two bits */
|
||||
#define LLC_PDU_TYPE_U 3 /* first two bits */
|
||||
#define LLC_PDU_TYPE_I 0 /* first bit */
|
||||
#define LLC_PDU_TYPE_S 1 /* first two bits */
|
||||
#define LLC_PDU_TYPE_U 3 /* first two bits */
|
||||
#define LLC_PDU_TYPE_U_XID 4 /* private type for detecting XID commands */
|
||||
|
||||
#define LLC_PDU_TYPE_IS_I(pdu) \
|
||||
((!(pdu->ctrl_1 & LLC_PDU_TYPE_I_MASK)) ? 1 : 0)
|
||||
|
@ -230,9 +233,18 @@ static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb)
|
|||
static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
|
||||
u8 ssap, u8 dsap, u8 cr)
|
||||
{
|
||||
const int hlen = type == LLC_PDU_TYPE_U ? 3 : 4;
|
||||
int hlen = 4; /* default value for I and S types */
|
||||
struct llc_pdu_un *pdu;
|
||||
|
||||
switch (type) {
|
||||
case LLC_PDU_TYPE_U:
|
||||
hlen = 3;
|
||||
break;
|
||||
case LLC_PDU_TYPE_U_XID:
|
||||
hlen = 6;
|
||||
break;
|
||||
}
|
||||
|
||||
skb_push(skb, hlen);
|
||||
skb_reset_network_header(skb);
|
||||
pdu = llc_pdu_un_hdr(skb);
|
||||
|
@ -374,7 +386,10 @@ static inline void llc_pdu_init_as_xid_cmd(struct sk_buff *skb,
|
|||
xid_info->fmt_id = LLC_XID_FMT_ID; /* 0x81 */
|
||||
xid_info->type = svcs_supported;
|
||||
xid_info->rw = rx_window << 1; /* size of receive window */
|
||||
skb_put(skb, sizeof(struct llc_xid_info));
|
||||
|
||||
/* no need to push/put since llc_pdu_header_init() has already
|
||||
* pushed 3 + 3 bytes
|
||||
*/
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -984,6 +984,7 @@ struct sctp_transport {
|
|||
} cacc;
|
||||
|
||||
struct {
|
||||
__u32 last_rtx_chunks;
|
||||
__u16 pmtu;
|
||||
__u16 probe_size;
|
||||
__u16 probe_high;
|
||||
|
@ -1024,8 +1025,8 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
|
|||
void sctp_transport_immediate_rtx(struct sctp_transport *);
|
||||
void sctp_transport_dst_release(struct sctp_transport *t);
|
||||
void sctp_transport_dst_confirm(struct sctp_transport *t);
|
||||
void sctp_transport_pl_send(struct sctp_transport *t);
|
||||
void sctp_transport_pl_recv(struct sctp_transport *t);
|
||||
bool sctp_transport_pl_send(struct sctp_transport *t);
|
||||
bool sctp_transport_pl_recv(struct sctp_transport *t);
|
||||
|
||||
|
||||
/* This is the structure we use to queue packets as they come into
|
||||
|
|
|
@ -32,6 +32,8 @@
|
|||
#include <linux/perf_event.h>
|
||||
#include <linux/extable.h>
|
||||
#include <linux/log2.h>
|
||||
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
/* Registers */
|
||||
|
@ -1377,6 +1379,7 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
|
|||
/* Non-UAPI available opcodes. */
|
||||
[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
|
||||
[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
|
||||
[BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC,
|
||||
[BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
|
||||
[BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
|
||||
[BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
|
||||
|
@ -1621,7 +1624,21 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
|
|||
COND_JMP(s, JSGE, >=)
|
||||
COND_JMP(s, JSLE, <=)
|
||||
#undef COND_JMP
|
||||
/* STX and ST and LDX*/
|
||||
/* ST, STX and LDX*/
|
||||
ST_NOSPEC:
|
||||
/* Speculation barrier for mitigating Speculative Store Bypass.
|
||||
* In case of arm64, we rely on the firmware mitigation as
|
||||
* controlled via the ssbd kernel parameter. Whenever the
|
||||
* mitigation is enabled, it works for all of the kernel code
|
||||
* with no need to provide any additional instructions here.
|
||||
* In case of x86, we use 'lfence' insn for mitigation. We
|
||||
* reuse preexisting logic from Spectre v1 mitigation that
|
||||
* happens to produce the required code on x86 for v4 as well.
|
||||
*/
|
||||
#ifdef CONFIG_X86
|
||||
barrier_nospec();
|
||||
#endif
|
||||
CONT;
|
||||
#define LDST(SIZEOP, SIZE) \
|
||||
STX_MEM_##SIZEOP: \
|
||||
*(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
|
||||
|
|
|
@ -206,15 +206,17 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
|
|||
verbose(cbs->private_data, "BUG_%02x\n", insn->code);
|
||||
}
|
||||
} else if (class == BPF_ST) {
|
||||
if (BPF_MODE(insn->code) != BPF_MEM) {
|
||||
if (BPF_MODE(insn->code) == BPF_MEM) {
|
||||
verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
|
||||
insn->code,
|
||||
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
||||
insn->dst_reg,
|
||||
insn->off, insn->imm);
|
||||
} else if (BPF_MODE(insn->code) == 0xc0 /* BPF_NOSPEC, no UAPI */) {
|
||||
verbose(cbs->private_data, "(%02x) nospec\n", insn->code);
|
||||
} else {
|
||||
verbose(cbs->private_data, "BUG_st_%02x\n", insn->code);
|
||||
return;
|
||||
}
|
||||
verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
|
||||
insn->code,
|
||||
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
||||
insn->dst_reg,
|
||||
insn->off, insn->imm);
|
||||
} else if (class == BPF_LDX) {
|
||||
if (BPF_MODE(insn->code) != BPF_MEM) {
|
||||
verbose(cbs->private_data, "BUG_ldx_%02x\n", insn->code);
|
||||
|
|
|
@ -2610,6 +2610,19 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
|
|||
cur = env->cur_state->frame[env->cur_state->curframe];
|
||||
if (value_regno >= 0)
|
||||
reg = &cur->regs[value_regno];
|
||||
if (!env->bypass_spec_v4) {
|
||||
bool sanitize = reg && is_spillable_regtype(reg->type);
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (state->stack[spi].slot_type[i] == STACK_INVALID) {
|
||||
sanitize = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (sanitize)
|
||||
env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
|
||||
}
|
||||
|
||||
if (reg && size == BPF_REG_SIZE && register_is_bounded(reg) &&
|
||||
!register_is_null(reg) && env->bpf_capable) {
|
||||
|
@ -2632,47 +2645,10 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
|
|||
verbose(env, "invalid size of register spill\n");
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (state != cur && reg->type == PTR_TO_STACK) {
|
||||
verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!env->bypass_spec_v4) {
|
||||
bool sanitize = false;
|
||||
|
||||
if (state->stack[spi].slot_type[0] == STACK_SPILL &&
|
||||
register_is_const(&state->stack[spi].spilled_ptr))
|
||||
sanitize = true;
|
||||
for (i = 0; i < BPF_REG_SIZE; i++)
|
||||
if (state->stack[spi].slot_type[i] == STACK_MISC) {
|
||||
sanitize = true;
|
||||
break;
|
||||
}
|
||||
if (sanitize) {
|
||||
int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
|
||||
int soff = (-spi - 1) * BPF_REG_SIZE;
|
||||
|
||||
/* detected reuse of integer stack slot with a pointer
|
||||
* which means either llvm is reusing stack slot or
|
||||
* an attacker is trying to exploit CVE-2018-3639
|
||||
* (speculative store bypass)
|
||||
* Have to sanitize that slot with preemptive
|
||||
* store of zero.
|
||||
*/
|
||||
if (*poff && *poff != soff) {
|
||||
/* disallow programs where single insn stores
|
||||
* into two different stack slots, since verifier
|
||||
* cannot sanitize them
|
||||
*/
|
||||
verbose(env,
|
||||
"insn %d cannot access two stack slots fp%d and fp%d",
|
||||
insn_idx, *poff, soff);
|
||||
return -EINVAL;
|
||||
}
|
||||
*poff = soff;
|
||||
}
|
||||
}
|
||||
save_register_state(state, spi, reg);
|
||||
} else {
|
||||
u8 type = STACK_MISC;
|
||||
|
@ -6561,6 +6537,12 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
|
|||
alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
|
||||
alu_state |= ptr_is_dst_reg ?
|
||||
BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
|
||||
|
||||
/* Limit pruning on unknown scalars to enable deep search for
|
||||
* potential masking differences from other program paths.
|
||||
*/
|
||||
if (!off_is_imm)
|
||||
env->explore_alu_limits = true;
|
||||
}
|
||||
|
||||
err = update_alu_sanitation_state(aux, alu_state, alu_limit);
|
||||
|
@ -9936,8 +9918,8 @@ static void clean_live_states(struct bpf_verifier_env *env, int insn,
|
|||
}
|
||||
|
||||
/* Returns true if (rold safe implies rcur safe) */
|
||||
static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
|
||||
struct bpf_id_pair *idmap)
|
||||
static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
|
||||
struct bpf_reg_state *rcur, struct bpf_id_pair *idmap)
|
||||
{
|
||||
bool equal;
|
||||
|
||||
|
@ -9963,6 +9945,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
|
|||
return false;
|
||||
switch (rold->type) {
|
||||
case SCALAR_VALUE:
|
||||
if (env->explore_alu_limits)
|
||||
return false;
|
||||
if (rcur->type == SCALAR_VALUE) {
|
||||
if (!rold->precise && !rcur->precise)
|
||||
return true;
|
||||
|
@ -10053,9 +10037,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool stacksafe(struct bpf_func_state *old,
|
||||
struct bpf_func_state *cur,
|
||||
struct bpf_id_pair *idmap)
|
||||
static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
|
||||
struct bpf_func_state *cur, struct bpf_id_pair *idmap)
|
||||
{
|
||||
int i, spi;
|
||||
|
||||
|
@ -10100,9 +10083,8 @@ static bool stacksafe(struct bpf_func_state *old,
|
|||
continue;
|
||||
if (old->stack[spi].slot_type[0] != STACK_SPILL)
|
||||
continue;
|
||||
if (!regsafe(&old->stack[spi].spilled_ptr,
|
||||
&cur->stack[spi].spilled_ptr,
|
||||
idmap))
|
||||
if (!regsafe(env, &old->stack[spi].spilled_ptr,
|
||||
&cur->stack[spi].spilled_ptr, idmap))
|
||||
/* when explored and current stack slot are both storing
|
||||
* spilled registers, check that stored pointers types
|
||||
* are the same as well.
|
||||
|
@ -10159,10 +10141,11 @@ static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_stat
|
|||
|
||||
memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
|
||||
for (i = 0; i < MAX_BPF_REG; i++)
|
||||
if (!regsafe(&old->regs[i], &cur->regs[i], env->idmap_scratch))
|
||||
if (!regsafe(env, &old->regs[i], &cur->regs[i],
|
||||
env->idmap_scratch))
|
||||
return false;
|
||||
|
||||
if (!stacksafe(old, cur, env->idmap_scratch))
|
||||
if (!stacksafe(env, old, cur, env->idmap_scratch))
|
||||
return false;
|
||||
|
||||
if (!refsafe(old, cur))
|
||||
|
@ -11906,35 +11889,33 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
|||
|
||||
for (i = 0; i < insn_cnt; i++, insn++) {
|
||||
bpf_convert_ctx_access_t convert_ctx_access;
|
||||
bool ctx_access;
|
||||
|
||||
if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
|
||||
insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
|
||||
insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
|
||||
insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
|
||||
insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) {
|
||||
type = BPF_READ;
|
||||
else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
|
||||
insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
|
||||
insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
|
||||
insn->code == (BPF_STX | BPF_MEM | BPF_DW))
|
||||
ctx_access = true;
|
||||
} else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
|
||||
insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
|
||||
insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
|
||||
insn->code == (BPF_STX | BPF_MEM | BPF_DW) ||
|
||||
insn->code == (BPF_ST | BPF_MEM | BPF_B) ||
|
||||
insn->code == (BPF_ST | BPF_MEM | BPF_H) ||
|
||||
insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
|
||||
insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
|
||||
type = BPF_WRITE;
|
||||
else
|
||||
ctx_access = BPF_CLASS(insn->code) == BPF_STX;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (type == BPF_WRITE &&
|
||||
env->insn_aux_data[i + delta].sanitize_stack_off) {
|
||||
env->insn_aux_data[i + delta].sanitize_stack_spill) {
|
||||
struct bpf_insn patch[] = {
|
||||
/* Sanitize suspicious stack slot with zero.
|
||||
* There are no memory dependencies for this store,
|
||||
* since it's only using frame pointer and immediate
|
||||
* constant of zero
|
||||
*/
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_FP,
|
||||
env->insn_aux_data[i + delta].sanitize_stack_off,
|
||||
0),
|
||||
/* the original STX instruction will immediately
|
||||
* overwrite the same stack slot with appropriate value
|
||||
*/
|
||||
*insn,
|
||||
BPF_ST_NOSPEC(),
|
||||
};
|
||||
|
||||
cnt = ARRAY_SIZE(patch);
|
||||
|
@ -11948,6 +11929,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
|||
continue;
|
||||
}
|
||||
|
||||
if (!ctx_access)
|
||||
continue;
|
||||
|
||||
switch (env->insn_aux_data[i + delta].ptr_type) {
|
||||
case PTR_TO_CTX:
|
||||
if (!ops->convert_ctx_access)
|
||||
|
@ -12752,37 +12736,6 @@ static void free_states(struct bpf_verifier_env *env)
|
|||
}
|
||||
}
|
||||
|
||||
/* The verifier is using insn_aux_data[] to store temporary data during
|
||||
* verification and to store information for passes that run after the
|
||||
* verification like dead code sanitization. do_check_common() for subprogram N
|
||||
* may analyze many other subprograms. sanitize_insn_aux_data() clears all
|
||||
* temporary data after do_check_common() finds that subprogram N cannot be
|
||||
* verified independently. pass_cnt counts the number of times
|
||||
* do_check_common() was run and insn->aux->seen tells the pass number
|
||||
* insn_aux_data was touched. These variables are compared to clear temporary
|
||||
* data from failed pass. For testing and experiments do_check_common() can be
|
||||
* run multiple times even when prior attempt to verify is unsuccessful.
|
||||
*
|
||||
* Note that special handling is needed on !env->bypass_spec_v1 if this is
|
||||
* ever called outside of error path with subsequent program rejection.
|
||||
*/
|
||||
static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_insn *insn = env->prog->insnsi;
|
||||
struct bpf_insn_aux_data *aux;
|
||||
int i, class;
|
||||
|
||||
for (i = 0; i < env->prog->len; i++) {
|
||||
class = BPF_CLASS(insn[i].code);
|
||||
if (class != BPF_LDX && class != BPF_STX)
|
||||
continue;
|
||||
aux = &env->insn_aux_data[i];
|
||||
if (aux->seen != env->pass_cnt)
|
||||
continue;
|
||||
memset(aux, 0, offsetof(typeof(*aux), orig_idx));
|
||||
}
|
||||
}
|
||||
|
||||
static int do_check_common(struct bpf_verifier_env *env, int subprog)
|
||||
{
|
||||
bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
|
||||
|
@ -12859,9 +12812,6 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog)
|
|||
if (!ret && pop_log)
|
||||
bpf_vlog_reset(&env->log, 0);
|
||||
free_states(env);
|
||||
if (ret)
|
||||
/* clean aux data in case subprog was rejected */
|
||||
sanitize_insn_aux_data(env);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -1075,11 +1075,16 @@ static bool j1939_session_deactivate_locked(struct j1939_session *session)
|
|||
|
||||
static bool j1939_session_deactivate(struct j1939_session *session)
|
||||
{
|
||||
struct j1939_priv *priv = session->priv;
|
||||
bool active;
|
||||
|
||||
j1939_session_list_lock(session->priv);
|
||||
j1939_session_list_lock(priv);
|
||||
/* This function should be called with a session ref-count of at
|
||||
* least 2.
|
||||
*/
|
||||
WARN_ON_ONCE(kref_read(&session->kref) < 2);
|
||||
active = j1939_session_deactivate_locked(session);
|
||||
j1939_session_list_unlock(session->priv);
|
||||
j1939_session_list_unlock(priv);
|
||||
|
||||
return active;
|
||||
}
|
||||
|
@ -1869,7 +1874,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
|
|||
if (!session->transmission)
|
||||
j1939_tp_schedule_txtimer(session, 0);
|
||||
} else {
|
||||
j1939_tp_set_rxtimeout(session, 250);
|
||||
j1939_tp_set_rxtimeout(session, 750);
|
||||
}
|
||||
session->last_cmd = 0xff;
|
||||
consume_skb(se_skb);
|
||||
|
|
|
@ -546,10 +546,18 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
rtnl_lock();
|
||||
lock_sock(sk);
|
||||
|
||||
if (ro->bound && ro->ifindex)
|
||||
if (ro->bound && ro->ifindex) {
|
||||
dev = dev_get_by_index(sock_net(sk), ro->ifindex);
|
||||
if (!dev) {
|
||||
if (count > 1)
|
||||
kfree(filter);
|
||||
err = -ENODEV;
|
||||
goto out_fil;
|
||||
}
|
||||
}
|
||||
|
||||
if (ro->bound) {
|
||||
/* (try to) register the new filters */
|
||||
|
@ -588,6 +596,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
|
|||
dev_put(dev);
|
||||
|
||||
release_sock(sk);
|
||||
rtnl_unlock();
|
||||
|
||||
break;
|
||||
|
||||
|
@ -600,10 +609,16 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
|
|||
|
||||
err_mask &= CAN_ERR_MASK;
|
||||
|
||||
rtnl_lock();
|
||||
lock_sock(sk);
|
||||
|
||||
if (ro->bound && ro->ifindex)
|
||||
if (ro->bound && ro->ifindex) {
|
||||
dev = dev_get_by_index(sock_net(sk), ro->ifindex);
|
||||
if (!dev) {
|
||||
err = -ENODEV;
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
/* remove current error mask */
|
||||
if (ro->bound) {
|
||||
|
@ -627,6 +642,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
|
|||
dev_put(dev);
|
||||
|
||||
release_sock(sk);
|
||||
rtnl_unlock();
|
||||
|
||||
break;
|
||||
|
||||
|
|
|
@ -9328,18 +9328,10 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
|
|||
|
||||
switch (attrs->flavour) {
|
||||
case DEVLINK_PORT_FLAVOUR_PHYSICAL:
|
||||
case DEVLINK_PORT_FLAVOUR_VIRTUAL:
|
||||
n = snprintf(name, len, "p%u", attrs->phys.port_number);
|
||||
if (n < len && attrs->split)
|
||||
n += snprintf(name + n, len - n, "s%u",
|
||||
attrs->phys.split_subport_number);
|
||||
if (!attrs->split)
|
||||
n = snprintf(name, len, "p%u", attrs->phys.port_number);
|
||||
else
|
||||
n = snprintf(name, len, "p%us%u",
|
||||
attrs->phys.port_number,
|
||||
attrs->phys.split_subport_number);
|
||||
|
||||
break;
|
||||
case DEVLINK_PORT_FLAVOUR_CPU:
|
||||
case DEVLINK_PORT_FLAVOUR_DSA:
|
||||
|
@ -9381,6 +9373,8 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
|
|||
n = snprintf(name, len, "pf%usf%u", attrs->pci_sf.pf,
|
||||
attrs->pci_sf.sf);
|
||||
break;
|
||||
case DEVLINK_PORT_FLAVOUR_VIRTUAL:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (n >= len)
|
||||
|
|
|
@ -1504,7 +1504,7 @@ __be32 flow_get_u32_dst(const struct flow_keys *flow)
|
|||
}
|
||||
EXPORT_SYMBOL(flow_get_u32_dst);
|
||||
|
||||
/* Sort the source and destination IP (and the ports if the IP are the same),
|
||||
/* Sort the source and destination IP and the ports,
|
||||
* to have consistent hash within the two directions
|
||||
*/
|
||||
static inline void __flow_hash_consistentify(struct flow_keys *keys)
|
||||
|
@ -1515,11 +1515,11 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
|
|||
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
|
||||
addr_diff = (__force u32)keys->addrs.v4addrs.dst -
|
||||
(__force u32)keys->addrs.v4addrs.src;
|
||||
if ((addr_diff < 0) ||
|
||||
(addr_diff == 0 &&
|
||||
((__force u16)keys->ports.dst <
|
||||
(__force u16)keys->ports.src))) {
|
||||
if (addr_diff < 0)
|
||||
swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
|
||||
|
||||
if ((__force u16)keys->ports.dst <
|
||||
(__force u16)keys->ports.src) {
|
||||
swap(keys->ports.src, keys->ports.dst);
|
||||
}
|
||||
break;
|
||||
|
@ -1527,13 +1527,13 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
|
|||
addr_diff = memcmp(&keys->addrs.v6addrs.dst,
|
||||
&keys->addrs.v6addrs.src,
|
||||
sizeof(keys->addrs.v6addrs.dst));
|
||||
if ((addr_diff < 0) ||
|
||||
(addr_diff == 0 &&
|
||||
((__force u16)keys->ports.dst <
|
||||
(__force u16)keys->ports.src))) {
|
||||
if (addr_diff < 0) {
|
||||
for (i = 0; i < 4; i++)
|
||||
swap(keys->addrs.v6addrs.src.s6_addr32[i],
|
||||
keys->addrs.v6addrs.dst.s6_addr32[i]);
|
||||
}
|
||||
if ((__force u16)keys->ports.dst <
|
||||
(__force u16)keys->ports.src) {
|
||||
swap(keys->ports.src, keys->ports.dst);
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -584,29 +584,42 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
|
|||
return sk_psock_skb_ingress(psock, skb);
|
||||
}
|
||||
|
||||
static void sock_drop(struct sock *sk, struct sk_buff *skb)
|
||||
static void sk_psock_skb_state(struct sk_psock *psock,
|
||||
struct sk_psock_work_state *state,
|
||||
struct sk_buff *skb,
|
||||
int len, int off)
|
||||
{
|
||||
sk_drops_add(sk, skb);
|
||||
kfree_skb(skb);
|
||||
spin_lock_bh(&psock->ingress_lock);
|
||||
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
|
||||
state->skb = skb;
|
||||
state->len = len;
|
||||
state->off = off;
|
||||
} else {
|
||||
sock_drop(psock->sk, skb);
|
||||
}
|
||||
spin_unlock_bh(&psock->ingress_lock);
|
||||
}
|
||||
|
||||
static void sk_psock_backlog(struct work_struct *work)
|
||||
{
|
||||
struct sk_psock *psock = container_of(work, struct sk_psock, work);
|
||||
struct sk_psock_work_state *state = &psock->work_state;
|
||||
struct sk_buff *skb;
|
||||
struct sk_buff *skb = NULL;
|
||||
bool ingress;
|
||||
u32 len, off;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&psock->work_mutex);
|
||||
if (state->skb) {
|
||||
if (unlikely(state->skb)) {
|
||||
spin_lock_bh(&psock->ingress_lock);
|
||||
skb = state->skb;
|
||||
len = state->len;
|
||||
off = state->off;
|
||||
state->skb = NULL;
|
||||
goto start;
|
||||
spin_unlock_bh(&psock->ingress_lock);
|
||||
}
|
||||
if (skb)
|
||||
goto start;
|
||||
|
||||
while ((skb = skb_dequeue(&psock->ingress_skb))) {
|
||||
len = skb->len;
|
||||
|
@ -621,9 +634,8 @@ static void sk_psock_backlog(struct work_struct *work)
|
|||
len, ingress);
|
||||
if (ret <= 0) {
|
||||
if (ret == -EAGAIN) {
|
||||
state->skb = skb;
|
||||
state->len = len;
|
||||
state->off = off;
|
||||
sk_psock_skb_state(psock, state, skb,
|
||||
len, off);
|
||||
goto end;
|
||||
}
|
||||
/* Hard errors break pipe and stop xmit. */
|
||||
|
@ -722,6 +734,11 @@ static void __sk_psock_zap_ingress(struct sk_psock *psock)
|
|||
skb_bpf_redirect_clear(skb);
|
||||
sock_drop(psock->sk, skb);
|
||||
}
|
||||
kfree_skb(psock->work_state.skb);
|
||||
/* We null the skb here to ensure that calls to sk_psock_backlog
|
||||
* do not pick up the free'd skb.
|
||||
*/
|
||||
psock->work_state.skb = NULL;
|
||||
__sk_psock_purge_ingress_msg(psock);
|
||||
}
|
||||
|
||||
|
@ -773,8 +790,6 @@ static void sk_psock_destroy(struct work_struct *work)
|
|||
|
||||
void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
|
||||
{
|
||||
sk_psock_stop(psock, false);
|
||||
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
sk_psock_restore_proto(sk, psock);
|
||||
rcu_assign_sk_user_data(sk, NULL);
|
||||
|
@ -784,6 +799,8 @@ void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
|
|||
sk_psock_stop_verdict(sk, psock);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
|
||||
sk_psock_stop(psock, false);
|
||||
|
||||
INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
|
||||
queue_rcu_work(system_wq, &psock->rwork);
|
||||
}
|
||||
|
|
|
@ -390,7 +390,7 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
|
|||
tunnel->i_seqno = ntohl(tpi->seq) + 1;
|
||||
}
|
||||
|
||||
skb_reset_network_header(skb);
|
||||
skb_set_network_header(skb, (tunnel->dev->type == ARPHRD_ETHER) ? ETH_HLEN : 0);
|
||||
|
||||
err = IP_ECN_decapsulate(iph, skb);
|
||||
if (unlikely(err)) {
|
||||
|
|
|
@ -549,9 +549,10 @@ int ip6_forward(struct sk_buff *skb)
|
|||
if (net->ipv6.devconf_all->proxy_ndp &&
|
||||
pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
|
||||
int proxied = ip6_forward_proxy_check(skb);
|
||||
if (proxied > 0)
|
||||
if (proxied > 0) {
|
||||
hdr->hop_limit--;
|
||||
return ip6_input(skb);
|
||||
else if (proxied < 0) {
|
||||
} else if (proxied < 0) {
|
||||
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
|
||||
goto drop;
|
||||
}
|
||||
|
|
|
@ -98,8 +98,16 @@ static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr)
|
|||
{
|
||||
u8 rc = LLC_PDU_LEN_U;
|
||||
|
||||
if (addr->sllc_test || addr->sllc_xid)
|
||||
if (addr->sllc_test)
|
||||
rc = LLC_PDU_LEN_U;
|
||||
else if (addr->sllc_xid)
|
||||
/* We need to expand header to sizeof(struct llc_xid_info)
|
||||
* since llc_pdu_init_as_xid_cmd() sets 4,5,6 bytes of LLC header
|
||||
* as XID PDU. In llc_ui_sendmsg() we reserved header size and then
|
||||
* filled all other space with user data. If we won't reserve this
|
||||
* bytes, llc_pdu_init_as_xid_cmd() will overwrite user data
|
||||
*/
|
||||
rc = LLC_PDU_LEN_U_XID;
|
||||
else if (sk->sk_type == SOCK_STREAM)
|
||||
rc = LLC_PDU_LEN_I;
|
||||
return rc;
|
||||
|
|
|
@ -79,7 +79,7 @@ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb)
|
|||
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
|
||||
int rc;
|
||||
|
||||
llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap,
|
||||
llc_pdu_header_init(skb, LLC_PDU_TYPE_U_XID, ev->saddr.lsap,
|
||||
ev->daddr.lsap, LLC_PDU_CMD);
|
||||
llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0);
|
||||
rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
|
||||
|
|
|
@ -152,6 +152,8 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
|
|||
struct vif_params *params)
|
||||
{
|
||||
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
||||
struct ieee80211_local *local = sdata->local;
|
||||
struct sta_info *sta;
|
||||
int ret;
|
||||
|
||||
ret = ieee80211_if_change_type(sdata, type);
|
||||
|
@ -162,7 +164,24 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
|
|||
RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
|
||||
ieee80211_check_fast_rx_iface(sdata);
|
||||
} else if (type == NL80211_IFTYPE_STATION && params->use_4addr >= 0) {
|
||||
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
|
||||
|
||||
if (params->use_4addr == ifmgd->use_4addr)
|
||||
return 0;
|
||||
|
||||
sdata->u.mgd.use_4addr = params->use_4addr;
|
||||
if (!ifmgd->associated)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&local->sta_mtx);
|
||||
sta = sta_info_get(sdata, ifmgd->bssid);
|
||||
if (sta)
|
||||
drv_sta_set_4addr(local, sdata, &sta->sta,
|
||||
params->use_4addr);
|
||||
mutex_unlock(&local->sta_mtx);
|
||||
|
||||
if (params->use_4addr)
|
||||
ieee80211_send_4addr_nullfunc(local, sdata);
|
||||
}
|
||||
|
||||
if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
|
||||
|
|
|
@ -2201,6 +2201,8 @@ void ieee80211_dynamic_ps_timer(struct timer_list *t);
|
|||
void ieee80211_send_nullfunc(struct ieee80211_local *local,
|
||||
struct ieee80211_sub_if_data *sdata,
|
||||
bool powersave);
|
||||
void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
|
||||
struct ieee80211_sub_if_data *sdata);
|
||||
void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
|
||||
struct ieee80211_hdr *hdr, bool ack, u16 tx_time);
|
||||
|
||||
|
|
|
@ -1095,8 +1095,8 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
|
|||
ieee80211_tx_skb(sdata, skb);
|
||||
}
|
||||
|
||||
static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
|
||||
struct ieee80211_sub_if_data *sdata)
|
||||
void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
|
||||
struct ieee80211_sub_if_data *sdata)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct ieee80211_hdr *nullfunc;
|
||||
|
|
|
@ -730,7 +730,8 @@ ieee80211_make_monitor_skb(struct ieee80211_local *local,
|
|||
* Need to make a copy and possibly remove radiotap header
|
||||
* and FCS from the original.
|
||||
*/
|
||||
skb = skb_copy_expand(*origskb, needed_headroom, 0, GFP_ATOMIC);
|
||||
skb = skb_copy_expand(*origskb, needed_headroom + NET_SKB_PAD,
|
||||
0, GFP_ATOMIC);
|
||||
|
||||
if (!skb)
|
||||
return NULL;
|
||||
|
|
|
@ -1147,6 +1147,29 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
|
|||
return queued;
|
||||
}
|
||||
|
||||
static void
|
||||
ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata,
|
||||
struct sta_info *sta,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct rate_control_ref *ref = sdata->local->rate_ctrl;
|
||||
u16 tid;
|
||||
|
||||
if (!ref || !(ref->ops->capa & RATE_CTRL_CAPA_AMPDU_TRIGGER))
|
||||
return;
|
||||
|
||||
if (!sta || !sta->sta.ht_cap.ht_supported ||
|
||||
!sta->sta.wme || skb_get_queue_mapping(skb) == IEEE80211_AC_VO ||
|
||||
skb->protocol == sdata->control_port_protocol)
|
||||
return;
|
||||
|
||||
tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
|
||||
if (likely(sta->ampdu_mlme.tid_tx[tid]))
|
||||
return;
|
||||
|
||||
ieee80211_start_tx_ba_session(&sta->sta, tid, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* initialises @tx
|
||||
* pass %NULL for the station if unknown, a valid pointer if known
|
||||
|
@ -1160,6 +1183,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
|
|||
struct ieee80211_local *local = sdata->local;
|
||||
struct ieee80211_hdr *hdr;
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
bool aggr_check = false;
|
||||
int tid;
|
||||
|
||||
memset(tx, 0, sizeof(*tx));
|
||||
|
@ -1188,8 +1212,10 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
|
|||
} else if (tx->sdata->control_port_protocol == tx->skb->protocol) {
|
||||
tx->sta = sta_info_get_bss(sdata, hdr->addr1);
|
||||
}
|
||||
if (!tx->sta && !is_multicast_ether_addr(hdr->addr1))
|
||||
if (!tx->sta && !is_multicast_ether_addr(hdr->addr1)) {
|
||||
tx->sta = sta_info_get(sdata, hdr->addr1);
|
||||
aggr_check = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
|
||||
|
@ -1199,8 +1225,12 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
|
|||
struct tid_ampdu_tx *tid_tx;
|
||||
|
||||
tid = ieee80211_get_tid(hdr);
|
||||
|
||||
tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
|
||||
if (!tid_tx && aggr_check) {
|
||||
ieee80211_aggr_check(sdata, tx->sta, skb);
|
||||
tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
|
||||
}
|
||||
|
||||
if (tid_tx) {
|
||||
bool queued;
|
||||
|
||||
|
@ -4120,29 +4150,6 @@ void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
|
|||
}
|
||||
EXPORT_SYMBOL(ieee80211_txq_schedule_start);
|
||||
|
||||
static void
|
||||
ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata,
|
||||
struct sta_info *sta,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct rate_control_ref *ref = sdata->local->rate_ctrl;
|
||||
u16 tid;
|
||||
|
||||
if (!ref || !(ref->ops->capa & RATE_CTRL_CAPA_AMPDU_TRIGGER))
|
||||
return;
|
||||
|
||||
if (!sta || !sta->sta.ht_cap.ht_supported ||
|
||||
!sta->sta.wme || skb_get_queue_mapping(skb) == IEEE80211_AC_VO ||
|
||||
skb->protocol == sdata->control_port_protocol)
|
||||
return;
|
||||
|
||||
tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
|
||||
if (likely(sta->ampdu_mlme.tid_tx[tid]))
|
||||
return;
|
||||
|
||||
ieee80211_start_tx_ba_session(&sta->sta, tid, 0);
|
||||
}
|
||||
|
||||
void __ieee80211_subif_start_xmit(struct sk_buff *skb,
|
||||
struct net_device *dev,
|
||||
u32 info_flags,
|
||||
|
|
|
@ -670,8 +670,13 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
|
|||
return false;
|
||||
|
||||
tstamp = nf_conn_tstamp_find(ct);
|
||||
if (tstamp && tstamp->stop == 0)
|
||||
if (tstamp) {
|
||||
s32 timeout = ct->timeout - nfct_time_stamp;
|
||||
|
||||
tstamp->stop = ktime_get_real_ns();
|
||||
if (timeout < 0)
|
||||
tstamp->stop -= jiffies_to_nsecs(-timeout);
|
||||
}
|
||||
|
||||
if (nf_conntrack_event_report(IPCT_DESTROY, ct,
|
||||
portid, report) < 0) {
|
||||
|
|
|
@ -331,7 +331,11 @@ EXPORT_SYMBOL_GPL(flow_offload_add);
|
|||
void flow_offload_refresh(struct nf_flowtable *flow_table,
|
||||
struct flow_offload *flow)
|
||||
{
|
||||
flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
|
||||
u32 timeout;
|
||||
|
||||
timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
|
||||
if (READ_ONCE(flow->timeout) != timeout)
|
||||
WRITE_ONCE(flow->timeout, timeout);
|
||||
|
||||
if (likely(!nf_flowtable_hw_offload(flow_table)))
|
||||
return;
|
||||
|
|
|
@ -8445,6 +8445,16 @@ static int nf_tables_commit_audit_alloc(struct list_head *adl,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void nf_tables_commit_audit_free(struct list_head *adl)
|
||||
{
|
||||
struct nft_audit_data *adp, *adn;
|
||||
|
||||
list_for_each_entry_safe(adp, adn, adl, list) {
|
||||
list_del(&adp->list);
|
||||
kfree(adp);
|
||||
}
|
||||
}
|
||||
|
||||
static void nf_tables_commit_audit_collect(struct list_head *adl,
|
||||
struct nft_table *table, u32 op)
|
||||
{
|
||||
|
@ -8509,6 +8519,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
|
|||
ret = nf_tables_commit_audit_alloc(&adl, trans->ctx.table);
|
||||
if (ret) {
|
||||
nf_tables_commit_chain_prepare_cancel(net);
|
||||
nf_tables_commit_audit_free(&adl);
|
||||
return ret;
|
||||
}
|
||||
if (trans->msg_type == NFT_MSG_NEWRULE ||
|
||||
|
@ -8518,6 +8529,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
|
|||
ret = nf_tables_commit_chain_prepare(net, chain);
|
||||
if (ret < 0) {
|
||||
nf_tables_commit_chain_prepare_cancel(net);
|
||||
nf_tables_commit_audit_free(&adl);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -174,7 +174,9 @@ static const struct nf_hook_entries *
|
|||
nfnl_hook_entries_head(u8 pf, unsigned int hook, struct net *net, const char *dev)
|
||||
{
|
||||
const struct nf_hook_entries *hook_head = NULL;
|
||||
#ifdef CONFIG_NETFILTER_INGRESS
|
||||
struct net_device *netdev;
|
||||
#endif
|
||||
|
||||
switch (pf) {
|
||||
case NFPROTO_IPV4:
|
||||
|
|
|
@ -48,24 +48,30 @@ static void nft_last_eval(const struct nft_expr *expr,
|
|||
{
|
||||
struct nft_last_priv *priv = nft_expr_priv(expr);
|
||||
|
||||
priv->last_jiffies = jiffies;
|
||||
priv->last_set = 1;
|
||||
if (READ_ONCE(priv->last_jiffies) != jiffies)
|
||||
WRITE_ONCE(priv->last_jiffies, jiffies);
|
||||
if (READ_ONCE(priv->last_set) == 0)
|
||||
WRITE_ONCE(priv->last_set, 1);
|
||||
}
|
||||
|
||||
static int nft_last_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
||||
{
|
||||
struct nft_last_priv *priv = nft_expr_priv(expr);
|
||||
unsigned long last_jiffies = READ_ONCE(priv->last_jiffies);
|
||||
u32 last_set = READ_ONCE(priv->last_set);
|
||||
__be64 msecs;
|
||||
|
||||
if (time_before(jiffies, priv->last_jiffies))
|
||||
priv->last_set = 0;
|
||||
if (time_before(jiffies, last_jiffies)) {
|
||||
WRITE_ONCE(priv->last_set, 0);
|
||||
last_set = 0;
|
||||
}
|
||||
|
||||
if (priv->last_set)
|
||||
msecs = nf_jiffies64_to_msecs(jiffies - priv->last_jiffies);
|
||||
if (last_set)
|
||||
msecs = nf_jiffies64_to_msecs(jiffies - last_jiffies);
|
||||
else
|
||||
msecs = 0;
|
||||
|
||||
if (nla_put_be32(skb, NFTA_LAST_SET, htonl(priv->last_set)) ||
|
||||
if (nla_put_be32(skb, NFTA_LAST_SET, htonl(last_set)) ||
|
||||
nla_put_be64(skb, NFTA_LAST_MSECS, msecs, NFTA_LAST_PAD))
|
||||
goto nla_put_failure;
|
||||
|
||||
|
|
|
@ -201,7 +201,9 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
|
|||
alen = sizeof_field(struct nf_nat_range, min_addr.ip6);
|
||||
break;
|
||||
default:
|
||||
return -EAFNOSUPPORT;
|
||||
if (tb[NFTA_NAT_REG_ADDR_MIN])
|
||||
return -EAFNOSUPPORT;
|
||||
break;
|
||||
}
|
||||
priv->family = family;
|
||||
|
||||
|
|
|
@ -518,8 +518,10 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
|
|||
if (!ipc)
|
||||
goto err;
|
||||
|
||||
if (sock_queue_rcv_skb(&ipc->sk, skb))
|
||||
if (sock_queue_rcv_skb(&ipc->sk, skb)) {
|
||||
qrtr_port_put(ipc);
|
||||
goto err;
|
||||
}
|
||||
|
||||
qrtr_port_put(ipc);
|
||||
}
|
||||
|
@ -839,6 +841,8 @@ static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
|
|||
|
||||
ipc = qrtr_port_lookup(to->sq_port);
|
||||
if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
|
||||
if (ipc)
|
||||
qrtr_port_put(ipc);
|
||||
kfree_skb(skb);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
|
|
@ -1203,7 +1203,7 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
|
|||
if (unlikely(!af))
|
||||
return NULL;
|
||||
|
||||
if (af->from_addr_param(&paddr, param, peer_port, 0))
|
||||
if (!af->from_addr_param(&paddr, param, peer_port, 0))
|
||||
return NULL;
|
||||
|
||||
return __sctp_lookup_association(net, laddr, &paddr, transportp);
|
||||
|
|
|
@ -100,8 +100,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
|
|||
list_for_each_entry_safe(addr, temp,
|
||||
&net->sctp.local_addr_list, list) {
|
||||
if (addr->a.sa.sa_family == AF_INET6 &&
|
||||
ipv6_addr_equal(&addr->a.v6.sin6_addr,
|
||||
&ifa->addr)) {
|
||||
ipv6_addr_equal(&addr->a.v6.sin6_addr,
|
||||
&ifa->addr) &&
|
||||
addr->a.v6.sin6_scope_id == ifa->idev->dev->ifindex) {
|
||||
sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
|
||||
found = 1;
|
||||
addr->valid = 0;
|
||||
|
|
|
@ -1109,12 +1109,12 @@ enum sctp_disposition sctp_sf_send_probe(struct net *net,
|
|||
if (!sctp_transport_pl_enabled(transport))
|
||||
return SCTP_DISPOSITION_CONSUME;
|
||||
|
||||
sctp_transport_pl_send(transport);
|
||||
|
||||
reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size);
|
||||
if (!reply)
|
||||
return SCTP_DISPOSITION_NOMEM;
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
|
||||
if (sctp_transport_pl_send(transport)) {
|
||||
reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size);
|
||||
if (!reply)
|
||||
return SCTP_DISPOSITION_NOMEM;
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
|
||||
}
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_PROBE_TIMER_UPDATE,
|
||||
SCTP_TRANSPORT(transport));
|
||||
|
||||
|
@ -1274,8 +1274,7 @@ enum sctp_disposition sctp_sf_backbeat_8_3(struct net *net,
|
|||
!sctp_transport_pl_enabled(link))
|
||||
return SCTP_DISPOSITION_DISCARD;
|
||||
|
||||
sctp_transport_pl_recv(link);
|
||||
if (link->pl.state == SCTP_PL_COMPLETE)
|
||||
if (sctp_transport_pl_recv(link))
|
||||
return SCTP_DISPOSITION_CONSUME;
|
||||
|
||||
return sctp_sf_send_probe(net, ep, asoc, type, link, commands);
|
||||
|
|
|
@ -258,16 +258,13 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
|
|||
sctp_transport_pl_update(transport);
|
||||
}
|
||||
|
||||
void sctp_transport_pl_send(struct sctp_transport *t)
|
||||
bool sctp_transport_pl_send(struct sctp_transport *t)
|
||||
{
|
||||
pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
|
||||
__func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
|
||||
|
||||
if (t->pl.probe_count < SCTP_MAX_PROBES) {
|
||||
t->pl.probe_count++;
|
||||
return;
|
||||
}
|
||||
if (t->pl.probe_count < SCTP_MAX_PROBES)
|
||||
goto out;
|
||||
|
||||
t->pl.last_rtx_chunks = t->asoc->rtx_data_chunks;
|
||||
t->pl.probe_count = 0;
|
||||
if (t->pl.state == SCTP_PL_BASE) {
|
||||
if (t->pl.probe_size == SCTP_BASE_PLPMTU) { /* BASE_PLPMTU Confirmation Failed */
|
||||
t->pl.state = SCTP_PL_ERROR; /* Base -> Error */
|
||||
|
@ -299,14 +296,27 @@ void sctp_transport_pl_send(struct sctp_transport *t)
|
|||
sctp_assoc_sync_pmtu(t->asoc);
|
||||
}
|
||||
}
|
||||
t->pl.probe_count = 1;
|
||||
|
||||
out:
|
||||
if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count < 30 &&
|
||||
!t->pl.probe_count && t->pl.last_rtx_chunks == t->asoc->rtx_data_chunks) {
|
||||
t->pl.raise_count++;
|
||||
return false;
|
||||
}
|
||||
|
||||
pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
|
||||
__func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
|
||||
|
||||
t->pl.probe_count++;
|
||||
return true;
|
||||
}
|
||||
|
||||
void sctp_transport_pl_recv(struct sctp_transport *t)
|
||||
bool sctp_transport_pl_recv(struct sctp_transport *t)
|
||||
{
|
||||
pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
|
||||
__func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
|
||||
|
||||
t->pl.last_rtx_chunks = t->asoc->rtx_data_chunks;
|
||||
t->pl.pmtu = t->pl.probe_size;
|
||||
t->pl.probe_count = 0;
|
||||
if (t->pl.state == SCTP_PL_BASE) {
|
||||
|
@ -323,7 +333,7 @@ void sctp_transport_pl_recv(struct sctp_transport *t)
|
|||
if (!t->pl.probe_high) {
|
||||
t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP,
|
||||
SCTP_MAX_PLPMTU);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
t->pl.probe_size += SCTP_PL_MIN_STEP;
|
||||
if (t->pl.probe_size >= t->pl.probe_high) {
|
||||
|
@ -335,14 +345,13 @@ void sctp_transport_pl_recv(struct sctp_transport *t)
|
|||
t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
|
||||
sctp_assoc_sync_pmtu(t->asoc);
|
||||
}
|
||||
} else if (t->pl.state == SCTP_PL_COMPLETE) {
|
||||
t->pl.raise_count++;
|
||||
if (t->pl.raise_count == 30) {
|
||||
/* Raise probe_size again after 30 * interval in Search Complete */
|
||||
t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */
|
||||
t->pl.probe_size += SCTP_PL_MIN_STEP;
|
||||
}
|
||||
} else if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count == 30) {
|
||||
/* Raise probe_size again after 30 * interval in Search Complete */
|
||||
t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */
|
||||
t->pl.probe_size += SCTP_PL_MIN_STEP;
|
||||
}
|
||||
|
||||
return t->pl.state == SCTP_PL_COMPLETE;
|
||||
}
|
||||
|
||||
static bool sctp_transport_pl_toobig(struct sctp_transport *t, u32 pmtu)
|
||||
|
|
|
@ -898,16 +898,10 @@ static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead,
|
|||
if (unlikely(!aead))
|
||||
return -ENOKEY;
|
||||
|
||||
/* Cow skb data if needed */
|
||||
if (likely(!skb_cloned(skb) &&
|
||||
(!skb_is_nonlinear(skb) || !skb_has_frag_list(skb)))) {
|
||||
nsg = 1 + skb_shinfo(skb)->nr_frags;
|
||||
} else {
|
||||
nsg = skb_cow_data(skb, 0, &unused);
|
||||
if (unlikely(nsg < 0)) {
|
||||
pr_err("RX: skb_cow_data() returned %d\n", nsg);
|
||||
return nsg;
|
||||
}
|
||||
nsg = skb_cow_data(skb, 0, &unused);
|
||||
if (unlikely(nsg < 0)) {
|
||||
pr_err("RX: skb_cow_data() returned %d\n", nsg);
|
||||
return nsg;
|
||||
}
|
||||
|
||||
/* Allocate memory for the AEAD operation */
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue