Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
 "I've been traveling so this accumulates more than week or so of bug
  fixing.  It perhaps looks a little worse than it really is.

   1) Fix deadlock in ath10k driver, from Ben Greear.

   2) Increase scan timeout in iwlwifi, from Luca Coelho.

   3) Unbreak STP by properly reinjecting STP packets back into the
      stack.  Regression fix from Ido Schimmel.

   4) Mediatek driver fixes (missing malloc failure checks, leaking of
      scratch memory, wrong indexing when mapping TX buffers, etc.) from
      John Crispin.

   5) Fix endianness bug in icmpv6_err() handler, from Hannes Frederic
      Sowa.

   6) Fix hashing of flows in UDP in the ruseport case, from Xuemin Su.

   7) Fix netlink notifications in ovs for tunnels, delete link messages
      are never emitted because of how the device registry state is
      handled.  From Nicolas Dichtel.

   8) Conntrack module leaks kmemcache on unload, from Florian Westphal.

   9) Prevent endless jump loops in nft rules, from Liping Zhang and
      Pablo Neira Ayuso.

  10) Not early enough spinlock initialization in mlx4, from Eric
      Dumazet.

  11) Bind refcount leak in act_ipt, from Cong WANG.

  12) Missing RCU locking in HTB scheduler, from Florian Westphal.

  13) Several small MACSEC bug fixes from Sabrina Dubroca (missing RCU
      barrier, using heap for SG and IV, and erroneous use of async flag
      when allocating AEAD conext.)

  14) RCU handling fix in TIPC, from Ying Xue.

  15) Pass correct protocol down into ipv4_{update_pmtu,redirect}() in
      SIT driver, from Simon Horman.

  16) Socket timer deadlock fix in TIPC from Jon Paul Maloy.

  17) Fix potential deadlock in team enslave, from Ido Schimmel.

  18) Memory leak in KCM procfs handling, from Jiri Slaby.

  19) ESN generation fix in ipv4 ESP, from Herbert Xu.

  20) Fix GFP_KERNEL allocations with locks held in act_ife, from Cong
      WANG.

  21) Use after free in netem, from Eric Dumazet.

  22) Uninitialized last assert time in multicast router code, from Tom
      Goff.

  23) Skip raw sockets in sock_diag destruction broadcast, from Willem
      de Bruijn.

  24) Fix link status reporting in thunderx, from Sunil Goutham.

  25) Limit resegmentation of retransmit queue so that we do not
      retransmit too large GSO frames.  From Eric Dumazet.

  26) Delay bpf program release after grace period, from Daniel
      Borkmann"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (141 commits)
  openvswitch: fix conntrack netlink event delivery
  qed: Protect the doorbell BAR with the write barriers.
  neigh: Explicitly declare RCU-bh read side critical section in neigh_xmit()
  e1000e: keep VLAN interfaces functional after rxvlan off
  cfg80211: fix proto in ieee80211_data_to_8023 for frames without LLC header
  qlcnic: use the correct ring in qlcnic_83xx_process_rcv_ring_diag()
  bpf, perf: delay release of BPF prog after grace period
  net: bridge: fix vlan stats continue counter
  tcp: do not send too big packets at retransmit time
  ibmvnic: fix to use list_for_each_safe() when delete items
  net: thunderx: Fix TL4 configuration for secondary Qsets
  net: thunderx: Fix link status reporting
  net/mlx5e: Reorganize ethtool statistics
  net/mlx5e: Fix number of PFC counters reported to ethtool
  net/mlx5e: Prevent adding the same vxlan port
  net/mlx5e: Check for BlueFlame capability before allocating SQ uar
  net/mlx5e: Change enum to better reflect usage
  net/mlx5: Add ConnectX-5 PCIe 4.0 to list of supported devices
  net/mlx5: Update command strings
  net: marvell: Add separate config ANEG function for Marvell 88E1111
  ...
This commit is contained in:
Linus Torvalds 2016-06-29 11:50:42 -07:00
commit 32826ac41f
140 changed files with 1270 additions and 893 deletions

View File

@ -7424,7 +7424,7 @@ F: drivers/scsi/megaraid.*
F: drivers/scsi/megaraid/
MELLANOX ETHERNET DRIVER (mlx4_en)
M: Eugenia Emantayev <eugenia@mellanox.com>
M: Tariq Toukan <tariqt@mellanox.com>
L: netdev@vger.kernel.org
S: Supported
W: http://www.mellanox.com

View File

@ -56,11 +56,21 @@ static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
/* proc_event_counts is used as the sequence number of the netlink message */
static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
static inline void get_seq(__u32 *ts, int *cpu)
static inline void send_msg(struct cn_msg *msg)
{
preempt_disable();
*ts = __this_cpu_inc_return(proc_event_counts) - 1;
*cpu = smp_processor_id();
msg->seq = __this_cpu_inc_return(proc_event_counts) - 1;
((struct proc_event *)msg->data)->cpu = smp_processor_id();
/*
* Preemption remains disabled during send to ensure the messages are
* ordered according to their sequence numbers.
*
* If cn_netlink_send() fails, the data is not sent.
*/
cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT);
preempt_enable();
}
@ -77,7 +87,6 @@ void proc_fork_connector(struct task_struct *task)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_FORK;
rcu_read_lock();
@ -92,8 +101,7 @@ void proc_fork_connector(struct task_struct *task)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
/* If cn_netlink_send() failed, the data is not sent */
cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
send_msg(msg);
}
void proc_exec_connector(struct task_struct *task)
@ -108,7 +116,6 @@ void proc_exec_connector(struct task_struct *task)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_EXEC;
ev->event_data.exec.process_pid = task->pid;
@ -118,7 +125,7 @@ void proc_exec_connector(struct task_struct *task)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
send_msg(msg);
}
void proc_id_connector(struct task_struct *task, int which_id)
@ -150,14 +157,13 @@ void proc_id_connector(struct task_struct *task, int which_id)
return;
}
rcu_read_unlock();
get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
send_msg(msg);
}
void proc_sid_connector(struct task_struct *task)
@ -172,7 +178,6 @@ void proc_sid_connector(struct task_struct *task)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_SID;
ev->event_data.sid.process_pid = task->pid;
@ -182,7 +187,7 @@ void proc_sid_connector(struct task_struct *task)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
send_msg(msg);
}
void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
@ -197,7 +202,6 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_PTRACE;
ev->event_data.ptrace.process_pid = task->pid;
@ -215,7 +219,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
send_msg(msg);
}
void proc_comm_connector(struct task_struct *task)
@ -230,7 +234,6 @@ void proc_comm_connector(struct task_struct *task)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_COMM;
ev->event_data.comm.process_pid = task->pid;
@ -241,7 +244,7 @@ void proc_comm_connector(struct task_struct *task)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
send_msg(msg);
}
void proc_coredump_connector(struct task_struct *task)
@ -256,7 +259,6 @@ void proc_coredump_connector(struct task_struct *task)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_COREDUMP;
ev->event_data.coredump.process_pid = task->pid;
@ -266,7 +268,7 @@ void proc_coredump_connector(struct task_struct *task)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
send_msg(msg);
}
void proc_exit_connector(struct task_struct *task)
@ -281,7 +283,6 @@ void proc_exit_connector(struct task_struct *task)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_EXIT;
ev->event_data.exit.process_pid = task->pid;
@ -293,7 +294,7 @@ void proc_exit_connector(struct task_struct *task)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
send_msg(msg);
}
/*
@ -325,7 +326,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
msg->ack = rcvd_ack + 1;
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
send_msg(msg);
}
/**

View File

@ -657,6 +657,20 @@ static void __set_agg_ports_ready(struct aggregator *aggregator, int val)
}
}
static int __agg_active_ports(struct aggregator *agg)
{
struct port *port;
int active = 0;
for (port = agg->lag_ports; port;
port = port->next_port_in_aggregator) {
if (port->is_enabled)
active++;
}
return active;
}
/**
* __get_agg_bandwidth - get the total bandwidth of an aggregator
* @aggregator: the aggregator we're looking at
@ -664,39 +678,40 @@ static void __set_agg_ports_ready(struct aggregator *aggregator, int val)
*/
static u32 __get_agg_bandwidth(struct aggregator *aggregator)
{
int nports = __agg_active_ports(aggregator);
u32 bandwidth = 0;
if (aggregator->num_of_ports) {
if (nports) {
switch (__get_link_speed(aggregator->lag_ports)) {
case AD_LINK_SPEED_1MBPS:
bandwidth = aggregator->num_of_ports;
bandwidth = nports;
break;
case AD_LINK_SPEED_10MBPS:
bandwidth = aggregator->num_of_ports * 10;
bandwidth = nports * 10;
break;
case AD_LINK_SPEED_100MBPS:
bandwidth = aggregator->num_of_ports * 100;
bandwidth = nports * 100;
break;
case AD_LINK_SPEED_1000MBPS:
bandwidth = aggregator->num_of_ports * 1000;
bandwidth = nports * 1000;
break;
case AD_LINK_SPEED_2500MBPS:
bandwidth = aggregator->num_of_ports * 2500;
bandwidth = nports * 2500;
break;
case AD_LINK_SPEED_10000MBPS:
bandwidth = aggregator->num_of_ports * 10000;
bandwidth = nports * 10000;
break;
case AD_LINK_SPEED_20000MBPS:
bandwidth = aggregator->num_of_ports * 20000;
bandwidth = nports * 20000;
break;
case AD_LINK_SPEED_40000MBPS:
bandwidth = aggregator->num_of_ports * 40000;
bandwidth = nports * 40000;
break;
case AD_LINK_SPEED_56000MBPS:
bandwidth = aggregator->num_of_ports * 56000;
bandwidth = nports * 56000;
break;
case AD_LINK_SPEED_100000MBPS:
bandwidth = aggregator->num_of_ports * 100000;
bandwidth = nports * 100000;
break;
default:
bandwidth = 0; /* to silence the compiler */
@ -1530,10 +1545,10 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
switch (__get_agg_selection_mode(curr->lag_ports)) {
case BOND_AD_COUNT:
if (curr->num_of_ports > best->num_of_ports)
if (__agg_active_ports(curr) > __agg_active_ports(best))
return curr;
if (curr->num_of_ports < best->num_of_ports)
if (__agg_active_ports(curr) < __agg_active_ports(best))
return best;
/*FALLTHROUGH*/
@ -1561,8 +1576,14 @@ static int agg_device_up(const struct aggregator *agg)
if (!port)
return 0;
return netif_running(port->slave->dev) &&
netif_carrier_ok(port->slave->dev);
for (port = agg->lag_ports; port;
port = port->next_port_in_aggregator) {
if (netif_running(port->slave->dev) &&
netif_carrier_ok(port->slave->dev))
return 1;
}
return 0;
}
/**
@ -1610,7 +1631,7 @@ static void ad_agg_selection_logic(struct aggregator *agg,
agg->is_active = 0;
if (agg->num_of_ports && agg_device_up(agg))
if (__agg_active_ports(agg) && agg_device_up(agg))
best = ad_agg_selection_test(best, agg);
}
@ -1622,7 +1643,7 @@ static void ad_agg_selection_logic(struct aggregator *agg,
* answering partner.
*/
if (active && active->lag_ports &&
active->lag_ports->is_enabled &&
__agg_active_ports(active) &&
(__agg_has_partner(active) ||
(!__agg_has_partner(active) &&
!__agg_has_partner(best)))) {
@ -2133,7 +2154,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
else
temp_aggregator->lag_ports = temp_port->next_port_in_aggregator;
temp_aggregator->num_of_ports--;
if (temp_aggregator->num_of_ports == 0) {
if (__agg_active_ports(temp_aggregator) == 0) {
select_new_active_agg = temp_aggregator->is_active;
ad_clear_agg(temp_aggregator);
if (select_new_active_agg) {
@ -2432,7 +2453,9 @@ void bond_3ad_adapter_speed_duplex_changed(struct slave *slave)
*/
void bond_3ad_handle_link_change(struct slave *slave, char link)
{
struct aggregator *agg;
struct port *port;
bool dummy;
port = &(SLAVE_AD_INFO(slave)->port);
@ -2459,6 +2482,9 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
port->is_enabled = false;
ad_update_actor_keys(port, true);
}
agg = __get_first_agg(port);
ad_agg_selection_logic(agg, &dummy);
netdev_dbg(slave->bond->dev, "Port %d changed link status to %s\n",
port->actor_port_number,
link == BOND_LINK_UP ? "UP" : "DOWN");
@ -2499,7 +2525,7 @@ int bond_3ad_set_carrier(struct bonding *bond)
active = __get_active_agg(&(SLAVE_AD_INFO(first_slave)->aggregator));
if (active) {
/* are enough slaves available to consider link up? */
if (active->num_of_ports < bond->params.min_links) {
if (__agg_active_ports(active) < bond->params.min_links) {
if (netif_carrier_ok(bond->dev)) {
netif_carrier_off(bond->dev);
goto out;

View File

@ -712,9 +712,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
/* upper group completed, look again in lower */
if (priv->rx_next > get_mb_rx_low_last(priv) &&
quota > 0 && mb > get_mb_rx_last(priv)) {
mb > get_mb_rx_last(priv)) {
priv->rx_next = get_mb_rx_first(priv);
goto again;
if (quota > 0)
goto again;
}
return received;

View File

@ -332,9 +332,23 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
for (i = 0; i < frame->can_dlc; i += 2) {
priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
frame->data[i] | (frame->data[i + 1] << 8));
if (priv->type == BOSCH_D_CAN) {
u32 data = 0, dreg = C_CAN_IFACE(DATA1_REG, iface);
for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
data = (u32)frame->data[i];
data |= (u32)frame->data[i + 1] << 8;
data |= (u32)frame->data[i + 2] << 16;
data |= (u32)frame->data[i + 3] << 24;
priv->write_reg32(priv, dreg, data);
}
} else {
for (i = 0; i < frame->can_dlc; i += 2) {
priv->write_reg(priv,
C_CAN_IFACE(DATA1_REG, iface) + i / 2,
frame->data[i] |
(frame->data[i + 1] << 8));
}
}
}
@ -402,10 +416,20 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
} else {
int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
data = priv->read_reg(priv, dreg);
frame->data[i] = data;
frame->data[i + 1] = data >> 8;
if (priv->type == BOSCH_D_CAN) {
for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
data = priv->read_reg32(priv, dreg);
frame->data[i] = data;
frame->data[i + 1] = data >> 8;
frame->data[i + 2] = data >> 16;
frame->data[i + 3] = data >> 24;
}
} else {
for (i = 0; i < frame->can_dlc; i += 2, dreg++) {
data = priv->read_reg(priv, dreg);
frame->data[i] = data;
frame->data[i + 1] = data >> 8;
}
}
}

View File

@ -798,6 +798,9 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[])
* - control mode with CAN_CTRLMODE_FD set
*/
if (!data)
return 0;
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
@ -1008,6 +1011,11 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
return -EOPNOTSUPP;
}
static void can_dellink(struct net_device *dev, struct list_head *head)
{
return;
}
static struct rtnl_link_ops can_link_ops __read_mostly = {
.kind = "can",
.maxtype = IFLA_CAN_MAX,
@ -1016,6 +1024,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
.validate = can_validate,
.newlink = can_newlink,
.changelink = can_changelink,
.dellink = can_dellink,
.get_size = can_get_size,
.fill_info = can_fill_info,
.get_xstats_size = can_get_xstats_size,

View File

@ -16,7 +16,8 @@ config CAN_ESD_USB2
config CAN_GS_USB
tristate "Geschwister Schneider UG interfaces"
---help---
This driver supports the Geschwister Schneider USB/CAN devices.
This driver supports the Geschwister Schneider and bytewerk.org
candleLight USB CAN interfaces USB/CAN devices
If unsure choose N,
choose Y for built in support,
M to compile as module (module will be named: gs_usb).
@ -46,6 +47,8 @@ config CAN_KVASER_USB
- Kvaser USBcan R
- Kvaser Leaf Light v2
- Kvaser Mini PCI Express HS
- Kvaser Mini PCI Express 2xHS
- Kvaser USBcan Light 2xHS
- Kvaser USBcan II HS/HS
- Kvaser USBcan II HS/LS
- Kvaser USBcan Rugged ("USBcan Rev B")

View File

@ -1,7 +1,9 @@
/* CAN driver for Geschwister Schneider USB/CAN devices.
/* CAN driver for Geschwister Schneider USB/CAN devices
* and bytewerk.org candleLight USB CAN interfaces.
*
* Copyright (C) 2013 Geschwister Schneider Technologie-,
* Copyright (C) 2013-2016 Geschwister Schneider Technologie-,
* Entwicklungs- und Vertriebs UG (Haftungsbeschränkt).
* Copyright (C) 2016 Hubert Denkmair
*
* Many thanks to all socketcan devs!
*
@ -29,6 +31,9 @@
#define USB_GSUSB_1_VENDOR_ID 0x1d50
#define USB_GSUSB_1_PRODUCT_ID 0x606f
#define USB_CANDLELIGHT_VENDOR_ID 0x1209
#define USB_CANDLELIGHT_PRODUCT_ID 0x2323
#define GSUSB_ENDPOINT_IN 1
#define GSUSB_ENDPOINT_OUT 2
@ -952,6 +957,8 @@ static void gs_usb_disconnect(struct usb_interface *intf)
static const struct usb_device_id gs_usb_table[] = {
{ USB_DEVICE_INTERFACE_NUMBER(USB_GSUSB_1_VENDOR_ID,
USB_GSUSB_1_PRODUCT_ID, 0) },
{ USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID,
USB_CANDLELIGHT_PRODUCT_ID, 0) },
{} /* Terminating entry */
};
@ -969,5 +976,6 @@ module_usb_driver(gs_usb_driver);
MODULE_AUTHOR("Maximilian Schneider <mws@schneidersoft.net>");
MODULE_DESCRIPTION(
"Socket CAN device driver for Geschwister Schneider Technologie-, "
"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces.");
"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces\n"
"and bytewerk.org candleLight USB CAN interfaces.");
MODULE_LICENSE("GPL v2");

View File

@ -59,11 +59,14 @@
#define USB_CAN_R_PRODUCT_ID 39
#define USB_LEAF_LITE_V2_PRODUCT_ID 288
#define USB_MINI_PCIE_HS_PRODUCT_ID 289
#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290
#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 291
#define USB_MINI_PCIE_2HS_PRODUCT_ID 292
static inline bool kvaser_is_leaf(const struct usb_device_id *id)
{
return id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
id->idProduct <= USB_MINI_PCIE_HS_PRODUCT_ID;
id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID;
}
/* Kvaser USBCan-II devices */
@ -537,6 +540,9 @@ static const struct usb_device_id kvaser_usb_table[] = {
.driver_info = KVASER_HAS_TXRX_ERRORS },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) },
/* USBCANII family IDs */
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),

View File

@ -509,8 +509,8 @@ static int au1000_mii_probe(struct net_device *dev)
* on the current MAC's MII bus
*/
for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
if (mdiobus_get_phy(aup->mii_bus, aup->phy_addr)) {
phydev = mdiobus_get_phy(aup->mii_bus, aup->phy_addr);
if (mdiobus_get_phy(aup->mii_bus, phy_addr)) {
phydev = mdiobus_get_phy(aup->mii_bus, phy_addr);
if (!aup->phy_search_highest_addr)
/* break out with first one found */
break;

View File

@ -96,10 +96,6 @@ struct alx_priv {
unsigned int rx_ringsz;
unsigned int rxbuf_size;
struct page *rx_page;
unsigned int rx_page_offset;
unsigned int rx_frag_size;
struct napi_struct napi;
struct alx_tx_queue txq;
struct alx_rx_queue rxq;

View File

@ -70,35 +70,6 @@ static void alx_free_txbuf(struct alx_priv *alx, int entry)
}
}
static struct sk_buff *alx_alloc_skb(struct alx_priv *alx, gfp_t gfp)
{
struct sk_buff *skb;
struct page *page;
if (alx->rx_frag_size > PAGE_SIZE)
return __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
page = alx->rx_page;
if (!page) {
alx->rx_page = page = alloc_page(gfp);
if (unlikely(!page))
return NULL;
alx->rx_page_offset = 0;
}
skb = build_skb(page_address(page) + alx->rx_page_offset,
alx->rx_frag_size);
if (likely(skb)) {
alx->rx_page_offset += alx->rx_frag_size;
if (alx->rx_page_offset >= PAGE_SIZE)
alx->rx_page = NULL;
else
get_page(page);
}
return skb;
}
static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
{
struct alx_rx_queue *rxq = &alx->rxq;
@ -115,9 +86,22 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
while (!cur_buf->skb && next != rxq->read_idx) {
struct alx_rfd *rfd = &rxq->rfd[cur];
skb = alx_alloc_skb(alx, gfp);
/*
* When DMA RX address is set to something like
* 0x....fc0, it will be very likely to cause DMA
* RFD overflow issue.
*
* To work around it, we apply rx skb with 64 bytes
* longer space, and offset the address whenever
* 0x....fc0 is detected.
*/
skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
if (!skb)
break;
if (((unsigned long)skb->data & 0xfff) == 0xfc0)
skb_reserve(skb, 64);
dma = dma_map_single(&alx->hw.pdev->dev,
skb->data, alx->rxbuf_size,
DMA_FROM_DEVICE);
@ -153,7 +137,6 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
}
return count;
}
@ -622,11 +605,6 @@ static void alx_free_rings(struct alx_priv *alx)
kfree(alx->txq.bufs);
kfree(alx->rxq.bufs);
if (alx->rx_page) {
put_page(alx->rx_page);
alx->rx_page = NULL;
}
dma_free_coherent(&alx->hw.pdev->dev,
alx->descmem.size,
alx->descmem.virt,
@ -681,7 +659,6 @@ static int alx_request_irq(struct alx_priv *alx)
alx->dev->name, alx);
if (!err)
goto out;
/* fall back to legacy interrupt */
pci_disable_msi(alx->hw.pdev);
}
@ -725,7 +702,6 @@ static int alx_init_sw(struct alx_priv *alx)
struct pci_dev *pdev = alx->hw.pdev;
struct alx_hw *hw = &alx->hw;
int err;
unsigned int head_size;
err = alx_identify_hw(alx);
if (err) {
@ -741,12 +717,7 @@ static int alx_init_sw(struct alx_priv *alx)
hw->smb_timer = 400;
hw->mtu = alx->dev->mtu;
alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu);
head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
alx->rx_frag_size = roundup_pow_of_two(head_size);
alx->tx_ringsz = 256;
alx->rx_ringsz = 512;
hw->imt = 200;
@ -848,7 +819,6 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
{
struct alx_priv *alx = netdev_priv(netdev);
int max_frame = ALX_MAX_FRAME_LEN(mtu);
unsigned int head_size;
if ((max_frame < ALX_MIN_FRAME_SIZE) ||
(max_frame > ALX_MAX_FRAME_SIZE))
@ -860,9 +830,6 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
netdev->mtu = mtu;
alx->hw.mtu = mtu;
alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
alx->rx_frag_size = roundup_pow_of_two(head_size);
netdev_update_features(netdev);
if (netif_running(netdev))
alx_reinit(alx);

View File

@ -267,15 +267,16 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
while (ring->start != ring->end) {
int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
struct bgmac_slot_info *slot = &ring->slots[slot_idx];
u32 ctl1;
u32 ctl0, ctl1;
int len;
if (slot_idx == empty_slot)
break;
ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
len = ctl1 & BGMAC_DESC_CTL1_LEN;
if (ctl1 & BGMAC_DESC_CTL0_SOF)
if (ctl0 & BGMAC_DESC_CTL0_SOF)
/* Unmap no longer used buffer */
dma_unmap_single(dma_dev, slot->dma_addr, len,
DMA_TO_DEVICE);
@ -1312,7 +1313,8 @@ static int bgmac_open(struct net_device *net_dev)
phy_start(bgmac->phy_dev);
netif_carrier_on(net_dev);
netif_start_queue(net_dev);
return 0;
}

View File

@ -499,6 +499,7 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
u32 rr_quantum;
u8 sq_idx = sq->sq_num;
u8 pqs_vnic;
int svf;
if (sq->sqs_mode)
pqs_vnic = nic->pqs_vf[vnic];
@ -511,10 +512,19 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
/* 24 bytes for FCS, IPG and preamble */
rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
if (!sq->sqs_mode) {
tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
} else {
for (svf = 0; svf < MAX_SQS_PER_VF; svf++) {
if (nic->vf_sqs[pqs_vnic][svf] == vnic)
break;
}
tl4 = (MAX_LMAC_PER_BGX * NIC_TL4_PER_LMAC);
tl4 += (lmac * NIC_TL4_PER_LMAC * MAX_SQS_PER_VF);
tl4 += (svf * NIC_TL4_PER_LMAC);
tl4 += (bgx * NIC_TL4_PER_BGX);
}
tl4 += sq_idx;
if (sq->sqs_mode)
tl4 += vnic * 8;
tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |

View File

@ -551,7 +551,9 @@ static int bgx_xaui_check_link(struct lmac *lmac)
}
/* Clear rcvflt bit (latching high) and read it back */
bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT)
bgx_reg_modify(bgx, lmacid,
BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
if (bgx->use_training) {
@ -570,13 +572,6 @@ static int bgx_xaui_check_link(struct lmac *lmac)
return -1;
}
/* Wait for MAC RX to be ready */
if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
SMU_RX_CTL_STATUS, true)) {
dev_err(&bgx->pdev->dev, "SMU RX link not okay\n");
return -1;
}
/* Wait for BGX RX to be idle */
if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
@ -589,29 +584,30 @@ static int bgx_xaui_check_link(struct lmac *lmac)
return -1;
}
if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
dev_err(&bgx->pdev->dev, "Receive fault\n");
return -1;
}
/* Receive link is latching low. Force it high and verify it */
bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
SPU_STATUS1_RCV_LNK, false)) {
dev_err(&bgx->pdev->dev, "SPU receive link down\n");
return -1;
}
/* Clear receive packet disable */
cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
cfg &= ~SPU_MISC_CTL_RX_DIS;
bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
return 0;
/* Check for MAC RX faults */
cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
/* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
cfg &= SMU_RX_CTL_STATUS;
if (!cfg)
return 0;
/* Rx local/remote fault seen.
* Do lmac reinit to see if condition recovers
*/
bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type);
return -1;
}
static void bgx_poll_for_link(struct work_struct *work)
{
struct lmac *lmac;
u64 link;
u64 spu_link, smu_link;
lmac = container_of(work, struct lmac, dwork.work);
@ -621,8 +617,11 @@ static void bgx_poll_for_link(struct work_struct *work)
bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
SPU_STATUS1_RCV_LNK, false);
link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
if (link & SPU_STATUS1_RCV_LNK) {
spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
if ((spu_link & SPU_STATUS1_RCV_LNK) &&
!(smu_link & SMU_RX_CTL_STATUS)) {
lmac->link_up = 1;
if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
lmac->last_speed = 40000;
@ -636,9 +635,15 @@ static void bgx_poll_for_link(struct work_struct *work)
}
if (lmac->last_link != lmac->link_up) {
if (lmac->link_up) {
if (bgx_xaui_check_link(lmac)) {
/* Errors, clear link_up state */
lmac->link_up = 0;
lmac->last_speed = SPEED_UNKNOWN;
lmac->last_duplex = DUPLEX_UNKNOWN;
}
}
lmac->last_link = lmac->link_up;
if (lmac->link_up)
bgx_xaui_check_link(lmac);
}
queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
@ -710,7 +715,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
{
struct lmac *lmac;
u64 cmrx_cfg;
u64 cfg;
lmac = &bgx->lmac[lmacid];
if (lmac->check_link) {
@ -719,9 +724,33 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
destroy_workqueue(lmac->check_link);
}
cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
cmrx_cfg &= ~(1 << 15);
bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
/* Disable packet reception */
cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
cfg &= ~CMR_PKT_RX_EN;
bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
/* Give chance for Rx/Tx FIFO to get drained */
bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
/* Disable packet transmission */
cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
cfg &= ~CMR_PKT_TX_EN;
bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
/* Disable serdes lanes */
if (!lmac->is_sgmii)
bgx_reg_modify(bgx, lmacid,
BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
else
bgx_reg_modify(bgx, lmacid,
BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN);
/* Disable LMAC */
cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
cfg &= ~CMR_EN;
bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
bgx_flush_dmac_addrs(bgx, lmacid);
if ((bgx->lmac_type != BGX_MODE_XFI) &&

View File

@ -41,6 +41,7 @@
#define BGX_CMRX_RX_STAT10 0xC0
#define BGX_CMRX_RX_BP_DROP 0xC8
#define BGX_CMRX_RX_DMAC_CTL 0x0E8
#define BGX_CMRX_RX_FIFO_LEN 0x108
#define BGX_CMR_RX_DMACX_CAM 0x200
#define RX_DMACX_CAM_EN BIT_ULL(48)
#define RX_DMACX_CAM_LMACID(x) (x << 49)
@ -50,6 +51,7 @@
#define BGX_CMR_CHAN_MSK_AND 0x450
#define BGX_CMR_BIST_STATUS 0x460
#define BGX_CMR_RX_LMACS 0x468
#define BGX_CMRX_TX_FIFO_LEN 0x518
#define BGX_CMRX_TX_STAT0 0x600
#define BGX_CMRX_TX_STAT1 0x608
#define BGX_CMRX_TX_STAT2 0x610

View File

@ -2121,7 +2121,7 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_error_buff *error_buff;
struct ibmvnic_error_buff *error_buff, *tmp;
unsigned long flags;
bool found = false;
int i;
@ -2133,7 +2133,7 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
}
spin_lock_irqsave(&adapter->error_list_lock, flags);
list_for_each_entry(error_buff, &adapter->errors, list)
list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
if (error_buff->error_id == crq->request_error_rsp.error_id) {
found = true;
list_del(&error_buff->list);
@ -3141,14 +3141,14 @@ static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_inflight_cmd *inflight_cmd;
struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_error_buff *error_buff;
struct ibmvnic_error_buff *error_buff, *tmp2;
unsigned long flags;
unsigned long flags2;
spin_lock_irqsave(&adapter->inflight_lock, flags);
list_for_each_entry(inflight_cmd, &adapter->inflight, list) {
list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
switch (inflight_cmd->crq.generic.cmd) {
case LOGIN:
dma_unmap_single(dev, adapter->login_buf_token,
@ -3165,8 +3165,8 @@ static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
break;
case REQUEST_ERROR_INFO:
spin_lock_irqsave(&adapter->error_list_lock, flags2);
list_for_each_entry(error_buff, &adapter->errors,
list) {
list_for_each_entry_safe(error_buff, tmp2,
&adapter->errors, list) {
dma_unmap_single(dev, error_buff->dma,
error_buff->len,
DMA_FROM_DEVICE);

View File

@ -154,6 +154,16 @@ void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
writel(val, hw->hw_addr + reg);
}
static bool e1000e_vlan_used(struct e1000_adapter *adapter)
{
u16 vid;
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
return true;
return false;
}
/**
* e1000_regdump - register printout routine
* @hw: pointer to the HW structure
@ -2789,7 +2799,7 @@ static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
}
/**
* e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
* e1000e_vlan_strip_disable - helper to disable HW VLAN stripping
* @adapter: board private structure to initialize
**/
static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
@ -3443,7 +3453,8 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
ew32(RCTL, rctl);
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX ||
e1000e_vlan_used(adapter))
e1000e_vlan_strip_enable(adapter);
else
e1000e_vlan_strip_disable(adapter);

View File

@ -481,20 +481,23 @@ static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
/* the qdma core needs scratch memory to be setup */
static int mtk_init_fq_dma(struct mtk_eth *eth)
{
dma_addr_t phy_ring_head, phy_ring_tail;
dma_addr_t phy_ring_tail;
int cnt = MTK_DMA_SIZE;
dma_addr_t dma_addr;
int i;
eth->scratch_ring = dma_alloc_coherent(eth->dev,
cnt * sizeof(struct mtk_tx_dma),
&phy_ring_head,
&eth->phy_scratch_ring,
GFP_ATOMIC | __GFP_ZERO);
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
GFP_KERNEL);
if (unlikely(!eth->scratch_head))
return -ENOMEM;
dma_addr = dma_map_single(eth->dev,
eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
DMA_FROM_DEVICE);
@ -502,19 +505,19 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
return -ENOMEM;
memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
phy_ring_tail = phy_ring_head +
phy_ring_tail = eth->phy_scratch_ring +
(sizeof(struct mtk_tx_dma) * (cnt - 1));
for (i = 0; i < cnt; i++) {
eth->scratch_ring[i].txd1 =
(dma_addr + (i * MTK_QDMA_PAGE_SIZE));
if (i < cnt - 1)
eth->scratch_ring[i].txd2 = (phy_ring_head +
eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
((i + 1) * sizeof(struct mtk_tx_dma)));
eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
}
mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
@ -671,7 +674,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
err_dma:
do {
tx_buf = mtk_desc_to_tx_buf(ring, txd);
tx_buf = mtk_desc_to_tx_buf(ring, itxd);
/* unmap dma */
mtk_tx_unmap(&dev->dev, tx_buf);
@ -701,6 +704,20 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb)
return nfrags;
}
static int mtk_queue_stopped(struct mtk_eth *eth)
{
int i;
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!eth->netdev[i])
continue;
if (netif_queue_stopped(eth->netdev[i]))
return 1;
}
return 0;
}
static void mtk_wake_queue(struct mtk_eth *eth)
{
int i;
@ -766,12 +783,9 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
goto drop;
if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) {
if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
mtk_stop_queue(eth);
if (unlikely(atomic_read(&ring->free_count) >
ring->thresh))
mtk_wake_queue(eth);
}
spin_unlock_irqrestore(&eth->page_lock, flags);
return NETDEV_TX_OK;
@ -826,6 +840,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
skb_free_frag(new_data);
netdev->stats.rx_dropped++;
goto release_desc;
}
@ -833,6 +848,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb = build_skb(data, ring->frag_size);
if (unlikely(!skb)) {
put_page(virt_to_head_page(new_data));
netdev->stats.rx_dropped++;
goto release_desc;
}
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
@ -921,7 +937,6 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
}
mtk_tx_unmap(eth->dev, tx_buf);
ring->last_free->txd2 = next_cpu;
ring->last_free = desc;
atomic_inc(&ring->free_count);
@ -946,7 +961,8 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
if (!total)
return 0;
if (atomic_read(&ring->free_count) > ring->thresh)
if (mtk_queue_stopped(eth) &&
(atomic_read(&ring->free_count) > ring->thresh))
mtk_wake_queue(eth);
return total;
@ -1027,9 +1043,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
ring->next_free = &ring->dma[0];
ring->last_free = &ring->dma[MTK_DMA_SIZE - 2];
ring->thresh = max((unsigned long)MTK_DMA_SIZE >> 2,
MAX_SKB_FRAGS);
ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
ring->thresh = MAX_SKB_FRAGS;
/* make sure that all changes to the dma ring are flushed before we
* continue
@ -1207,6 +1222,14 @@ static void mtk_dma_free(struct mtk_eth *eth)
for (i = 0; i < MTK_MAC_COUNT; i++)
if (eth->netdev[i])
netdev_reset_queue(eth->netdev[i]);
if (eth->scratch_ring) {
dma_free_coherent(eth->dev,
MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
eth->scratch_ring,
eth->phy_scratch_ring);
eth->scratch_ring = NULL;
eth->phy_scratch_ring = 0;
}
mtk_tx_clean(eth);
mtk_rx_clean(eth);
kfree(eth->scratch_head);
@ -1269,7 +1292,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
mtk_w32(eth,
MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN |
MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS |
MTK_RX_BT_32DWORDS,
MTK_RX_BT_32DWORDS | MTK_NDP_CO_PRO,
MTK_QDMA_GLO_CFG);
return 0;
@ -1383,7 +1406,7 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
/* disable delay and normal interrupt */
mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
mtk_irq_disable(eth, ~0);
mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
mtk_w32(eth, 0, MTK_RST_GL);
@ -1697,7 +1720,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
SET_NETDEV_DEV(eth->netdev[id], eth->dev);
eth->netdev[id]->watchdog_timeo = HZ;
eth->netdev[id]->watchdog_timeo = 5 * HZ;
eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
eth->netdev[id]->base_addr = (unsigned long)eth->base;
eth->netdev[id]->vlan_features = MTK_HW_FEATURES &

View File

@ -91,6 +91,7 @@
#define MTK_QDMA_GLO_CFG 0x1A04
#define MTK_RX_2B_OFFSET BIT(31)
#define MTK_RX_BT_32DWORDS (3 << 11)
#define MTK_NDP_CO_PRO BIT(10)
#define MTK_TX_WB_DDONE BIT(6)
#define MTK_DMA_SIZE_16DWORDS (2 << 4)
#define MTK_RX_DMA_BUSY BIT(3)
@ -357,6 +358,7 @@ struct mtk_rx_ring {
* @rx_ring: Pointer to the memore holding info about the RX ring
* @rx_napi: The NAPI struct
* @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
* @phy_scratch_ring: physical address of scratch_ring
* @scratch_head: The scratch memory that scratch_ring points to.
* @clk_ethif: The ethif clock
* @clk_esw: The switch clock
@ -384,6 +386,7 @@ struct mtk_eth {
struct mtk_rx_ring rx_ring;
struct napi_struct rx_napi;
struct mtk_tx_dma *scratch_ring;
dma_addr_t phy_scratch_ring;
void *scratch_head;
struct clk *clk_ethif;
struct clk *clk_esw;

View File

@ -2597,7 +2597,6 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
priv->cmd.free_head = 0;
sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
spin_lock_init(&priv->cmd.context_lock);
for (priv->cmd.token_mask = 1;
priv->cmd.token_mask < priv->cmd.max_cmds;

View File

@ -406,14 +406,18 @@ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
mutex_lock(&mdev->state_lock);
if (mdev->device_up && priv->port_up) {
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
if (err)
if (err) {
en_err(priv, "Failed configuring VLAN filter\n");
goto out;
}
}
if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
en_dbg(HW, priv, "failed adding vlan %d\n", vid);
mutex_unlock(&mdev->state_lock);
err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
if (err)
en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
return 0;
out:
mutex_unlock(&mdev->state_lock);
return err;
}
static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
@ -421,7 +425,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err;
int err = 0;
en_dbg(HW, priv, "Killing VID:%d\n", vid);
@ -438,7 +442,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
}
mutex_unlock(&mdev->state_lock);
return 0;
return err;
}
static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
@ -2032,11 +2036,20 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
return -ENOMEM;
}
static void mlx4_en_shutdown(struct net_device *dev)
{
rtnl_lock();
netif_device_detach(dev);
mlx4_en_close(dev);
rtnl_unlock();
}
void mlx4_en_destroy_netdev(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
bool shutdown = mdev->dev->persist->interface_state &
MLX4_INTERFACE_STATE_SHUTDOWN;
en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
@ -2044,7 +2057,10 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
if (priv->registered) {
devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
priv->port));
unregister_netdev(dev);
if (shutdown)
mlx4_en_shutdown(dev);
else
unregister_netdev(dev);
}
if (priv->allocated)
@ -2069,7 +2085,8 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
kfree(priv->tx_ring);
kfree(priv->tx_cq);
free_netdev(dev);
if (!shutdown)
free_netdev(dev);
}
static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
@ -2447,9 +2464,14 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
* strip that feature if this is an IPv6 encapsulated frame.
*/
if (skb->encapsulation &&
(skb->ip_summed == CHECKSUM_PARTIAL) &&
(ip_hdr(skb)->version != 4))
features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
(skb->ip_summed == CHECKSUM_PARTIAL)) {
struct mlx4_en_priv *priv = netdev_priv(dev);
if (!priv->vxlan_port ||
(ip_hdr(skb)->version != 4) ||
(udp_hdr(skb)->dest != priv->vxlan_port))
features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
return features;
}

View File

@ -3222,6 +3222,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
INIT_LIST_HEAD(&priv->pgdir_list);
mutex_init(&priv->pgdir_mutex);
spin_lock_init(&priv->cmd.context_lock);
INIT_LIST_HEAD(&priv->bf_list);
mutex_init(&priv->bf_mutex);
@ -4134,8 +4135,11 @@ static void mlx4_shutdown(struct pci_dev *pdev)
mlx4_info(persist->dev, "mlx4_shutdown was called\n");
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
if (persist->interface_state & MLX4_INTERFACE_STATE_UP) {
/* Notify mlx4 clients that the kernel is being shut down */
persist->interface_state |= MLX4_INTERFACE_STATE_SHUTDOWN;
mlx4_unload_one(pdev);
}
mutex_unlock(&persist->interface_state_mutex);
}

View File

@ -545,6 +545,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
default: return "unknown command opcode";
}
}

View File

@ -401,7 +401,7 @@ enum mlx5e_traffic_types {
};
enum {
MLX5E_STATE_ASYNC_EVENTS_ENABLE,
MLX5E_STATE_ASYNC_EVENTS_ENABLED,
MLX5E_STATE_OPENED,
MLX5E_STATE_DESTROYING,
};

View File

@ -184,7 +184,9 @@ static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
#define MLX5E_NUM_SQ_STATS(priv) \
(NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \
test_bit(MLX5E_STATE_OPENED, &priv->state))
#define MLX5E_NUM_PFC_COUNTERS(priv) hweight8(mlx5e_query_pfc_combined(priv))
#define MLX5E_NUM_PFC_COUNTERS(priv) \
(hweight8(mlx5e_query_pfc_combined(priv)) * \
NUM_PPORT_PER_PRIO_PFC_COUNTERS)
static int mlx5e_get_sset_count(struct net_device *dev, int sset)
{
@ -211,42 +213,41 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
/* SW counters */
for (i = 0; i < NUM_SW_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].name);
strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
/* Q counters */
for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].name);
strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
/* VPORT counters */
for (i = 0; i < NUM_VPORT_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
vport_stats_desc[i].name);
vport_stats_desc[i].format);
/* PPORT counters */
for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pport_802_3_stats_desc[i].name);
pport_802_3_stats_desc[i].format);
for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pport_2863_stats_desc[i].name);
pport_2863_stats_desc[i].format);
for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pport_2819_stats_desc[i].name);
pport_2819_stats_desc[i].format);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s",
prio,
pport_per_prio_traffic_stats_desc[i].name);
sprintf(data + (idx++) * ETH_GSTRING_LEN,
pport_per_prio_traffic_stats_desc[i].format, prio);
}
pfc_combined = mlx5e_query_pfc_combined(priv);
for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s",
prio, pport_per_prio_pfc_stats_desc[i].name);
sprintf(data + (idx++) * ETH_GSTRING_LEN,
pport_per_prio_pfc_stats_desc[i].format, prio);
}
}
@ -256,16 +257,15 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
/* per channel counters */
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN, "rx%d_%s", i,
rq_stats_desc[j].name);
sprintf(data + (idx++) * ETH_GSTRING_LEN,
rq_stats_desc[j].format, i);
for (tc = 0; tc < priv->params.num_tc; tc++)
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
"tx%d_%s",
priv->channeltc_to_txq_map[i][tc],
sq_stats_desc[j].name);
sq_stats_desc[j].format,
priv->channeltc_to_txq_map[i][tc]);
}
static void mlx5e_get_strings(struct net_device *dev,

View File

@ -105,11 +105,11 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes;
s->lro_packets += rq_stats->lro_packets;
s->lro_bytes += rq_stats->lro_bytes;
s->rx_lro_packets += rq_stats->lro_packets;
s->rx_lro_bytes += rq_stats->lro_bytes;
s->rx_csum_none += rq_stats->csum_none;
s->rx_csum_sw += rq_stats->csum_sw;
s->rx_csum_inner += rq_stats->csum_inner;
s->rx_csum_complete += rq_stats->csum_complete;
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
s->rx_wqe_err += rq_stats->wqe_err;
s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
s->rx_mpwqe_frag += rq_stats->mpwqe_frag;
@ -122,24 +122,23 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
s->tso_packets += sq_stats->tso_packets;
s->tso_bytes += sq_stats->tso_bytes;
s->tso_inner_packets += sq_stats->tso_inner_packets;
s->tso_inner_bytes += sq_stats->tso_inner_bytes;
s->tx_tso_packets += sq_stats->tso_packets;
s->tx_tso_bytes += sq_stats->tso_bytes;
s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped;
s->tx_csum_inner += sq_stats->csum_offload_inner;
tx_offload_none += sq_stats->csum_offload_none;
s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
tx_offload_none += sq_stats->csum_none;
}
}
/* Update calculated offload counters */
s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner;
s->rx_csum_good = s->rx_packets - s->rx_csum_none -
s->rx_csum_sw;
s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
s->link_down_events = MLX5_GET(ppcnt_reg,
s->link_down_events_phy = MLX5_GET(ppcnt_reg,
priv->stats.pport.phy_counters,
counter_set.phys_layer_cntrs.link_down_events);
}
@ -244,7 +243,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
{
struct mlx5e_priv *priv = vpriv;
if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
return;
switch (event) {
@ -260,12 +259,12 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
{
set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
}
static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
{
clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
}
@ -580,7 +579,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
int err;
err = mlx5_alloc_map_uar(mdev, &sq->uar, true);
err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf));
if (err)
return err;

View File

@ -689,7 +689,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
if (is_first_ethertype_ip(skb)) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
rq->stats.csum_sw++;
rq->stats.csum_complete++;
return;
}
@ -699,7 +699,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
if (cqe_is_tunneled(cqe)) {
skb->csum_level = 1;
skb->encapsulation = 1;
rq->stats.csum_inner++;
rq->stats.csum_unnecessary_inner++;
}
return;
}

View File

@ -42,9 +42,11 @@
be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
struct counter_desc {
char name[ETH_GSTRING_LEN];
char format[ETH_GSTRING_LEN];
int offset; /* Byte offset */
};
@ -53,18 +55,18 @@ struct mlx5e_sw_stats {
u64 rx_bytes;
u64 tx_packets;
u64 tx_bytes;
u64 tso_packets;
u64 tso_bytes;
u64 tso_inner_packets;
u64 tso_inner_bytes;
u64 lro_packets;
u64 lro_bytes;
u64 rx_csum_good;
u64 tx_tso_packets;
u64 tx_tso_bytes;
u64 tx_tso_inner_packets;
u64 tx_tso_inner_bytes;
u64 rx_lro_packets;
u64 rx_lro_bytes;
u64 rx_csum_unnecessary;
u64 rx_csum_none;
u64 rx_csum_sw;
u64 rx_csum_inner;
u64 tx_csum_offload;
u64 tx_csum_inner;
u64 rx_csum_complete;
u64 rx_csum_unnecessary_inner;
u64 tx_csum_partial;
u64 tx_csum_partial_inner;
u64 tx_queue_stopped;
u64 tx_queue_wake;
u64 tx_queue_dropped;
@ -76,7 +78,7 @@ struct mlx5e_sw_stats {
u64 rx_cqe_compress_pkts;
/* Special handling counters */
u64 link_down_events;
u64 link_down_events_phy;
};
static const struct counter_desc sw_stats_desc[] = {
@ -84,18 +86,18 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_good) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_sw) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_offload) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
@ -105,7 +107,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
};
struct mlx5e_qcounter_stats {
@ -125,12 +127,6 @@ struct mlx5e_vport_stats {
};
static const struct counter_desc vport_stats_desc[] = {
{ "rx_vport_error_packets",
VPORT_COUNTER_OFF(received_errors.packets) },
{ "rx_vport_error_bytes", VPORT_COUNTER_OFF(received_errors.octets) },
{ "tx_vport_error_packets",
VPORT_COUNTER_OFF(transmit_errors.packets) },
{ "tx_vport_error_bytes", VPORT_COUNTER_OFF(transmit_errors.octets) },
{ "rx_vport_unicast_packets",
VPORT_COUNTER_OFF(received_eth_unicast.packets) },
{ "rx_vport_unicast_bytes",
@ -192,94 +188,68 @@ struct mlx5e_pport_stats {
};
static const struct counter_desc pport_802_3_stats_desc[] = {
{ "frames_tx", PPORT_802_3_OFF(a_frames_transmitted_ok) },
{ "frames_rx", PPORT_802_3_OFF(a_frames_received_ok) },
{ "check_seq_err", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
{ "alignment_err", PPORT_802_3_OFF(a_alignment_errors) },
{ "octets_tx", PPORT_802_3_OFF(a_octets_transmitted_ok) },
{ "octets_received", PPORT_802_3_OFF(a_octets_received_ok) },
{ "multicast_xmitted", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
{ "broadcast_xmitted", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
{ "multicast_rx", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
{ "broadcast_rx", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
{ "in_range_len_errors", PPORT_802_3_OFF(a_in_range_length_errors) },
{ "out_of_range_len", PPORT_802_3_OFF(a_out_of_range_length_field) },
{ "too_long_errors", PPORT_802_3_OFF(a_frame_too_long_errors) },
{ "symbol_err", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
{ "mac_control_tx", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
{ "mac_control_rx", PPORT_802_3_OFF(a_mac_control_frames_received) },
{ "unsupported_op_rx",
PPORT_802_3_OFF(a_unsupported_opcodes_received) },
{ "pause_ctrl_rx", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
{ "pause_ctrl_tx",
PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
{ "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
{ "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
{ "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
{ "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
{ "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
{ "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
{ "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
{ "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
{ "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
{ "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
{ "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
{ "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
{ "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
{ "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
{ "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
{ "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
{ "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
{ "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
};
static const struct counter_desc pport_2863_stats_desc[] = {
{ "in_octets", PPORT_2863_OFF(if_in_octets) },
{ "in_ucast_pkts", PPORT_2863_OFF(if_in_ucast_pkts) },
{ "in_discards", PPORT_2863_OFF(if_in_discards) },
{ "in_errors", PPORT_2863_OFF(if_in_errors) },
{ "in_unknown_protos", PPORT_2863_OFF(if_in_unknown_protos) },
{ "out_octets", PPORT_2863_OFF(if_out_octets) },
{ "out_ucast_pkts", PPORT_2863_OFF(if_out_ucast_pkts) },
{ "out_discards", PPORT_2863_OFF(if_out_discards) },
{ "out_errors", PPORT_2863_OFF(if_out_errors) },
{ "in_multicast_pkts", PPORT_2863_OFF(if_in_multicast_pkts) },
{ "in_broadcast_pkts", PPORT_2863_OFF(if_in_broadcast_pkts) },
{ "out_multicast_pkts", PPORT_2863_OFF(if_out_multicast_pkts) },
{ "out_broadcast_pkts", PPORT_2863_OFF(if_out_broadcast_pkts) },
{ "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
{ "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
{ "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
};
static const struct counter_desc pport_2819_stats_desc[] = {
{ "drop_events", PPORT_2819_OFF(ether_stats_drop_events) },
{ "octets", PPORT_2819_OFF(ether_stats_octets) },
{ "pkts", PPORT_2819_OFF(ether_stats_pkts) },
{ "broadcast_pkts", PPORT_2819_OFF(ether_stats_broadcast_pkts) },
{ "multicast_pkts", PPORT_2819_OFF(ether_stats_multicast_pkts) },
{ "crc_align_errors", PPORT_2819_OFF(ether_stats_crc_align_errors) },
{ "undersize_pkts", PPORT_2819_OFF(ether_stats_undersize_pkts) },
{ "oversize_pkts", PPORT_2819_OFF(ether_stats_oversize_pkts) },
{ "fragments", PPORT_2819_OFF(ether_stats_fragments) },
{ "jabbers", PPORT_2819_OFF(ether_stats_jabbers) },
{ "collisions", PPORT_2819_OFF(ether_stats_collisions) },
{ "p64octets", PPORT_2819_OFF(ether_stats_pkts64octets) },
{ "p65to127octets", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
{ "p128to255octets", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
{ "p256to511octets", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
{ "p512to1023octets", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
{ "p1024to1518octets",
PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
{ "p1519to2047octets",
PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
{ "p2048to4095octets",
PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
{ "p4096to8191octets",
PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
{ "p8192to10239octets",
PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
{ "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
{ "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
{ "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
{ "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
{ "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
{ "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
{ "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
{ "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
{ "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
{ "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
{ "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
{ "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
{ "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
};
static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
{ "rx_octets", PPORT_PER_PRIO_OFF(rx_octets) },
{ "rx_frames", PPORT_PER_PRIO_OFF(rx_frames) },
{ "tx_octets", PPORT_PER_PRIO_OFF(tx_octets) },
{ "tx_frames", PPORT_PER_PRIO_OFF(tx_frames) },
{ "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
{ "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
{ "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
{ "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
};
static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
{ "rx_pause", PPORT_PER_PRIO_OFF(rx_pause) },
{ "rx_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
{ "tx_pause", PPORT_PER_PRIO_OFF(tx_pause) },
{ "tx_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
{ "rx_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
{ "rx_prio%d_pause", PPORT_PER_PRIO_OFF(rx_pause) },
{ "rx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
{ "tx_prio%d_pause", PPORT_PER_PRIO_OFF(tx_pause) },
{ "tx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
{ "rx_prio%d_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
};
struct mlx5e_rq_stats {
u64 packets;
u64 bytes;
u64 csum_sw;
u64 csum_inner;
u64 csum_complete;
u64 csum_unnecessary_inner;
u64 csum_none;
u64 lro_packets;
u64 lro_bytes;
@ -292,19 +262,19 @@ struct mlx5e_rq_stats {
};
static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_sw) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, wqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_frag) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_frag) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
};
struct mlx5e_sq_stats {
@ -315,28 +285,28 @@ struct mlx5e_sq_stats {
u64 tso_bytes;
u64 tso_inner_packets;
u64 tso_inner_bytes;
u64 csum_offload_inner;
u64 csum_partial_inner;
u64 nop;
/* less likely accessed in data path */
u64 csum_offload_none;
u64 csum_none;
u64 stopped;
u64 wake;
u64 dropped;
};
static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, nop) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, stopped) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, wake) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, dropped) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
};
#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)

View File

@ -192,12 +192,12 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
if (skb->encapsulation) {
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
MLX5_ETH_WQE_L4_INNER_CSUM;
sq->stats.csum_offload_inner++;
sq->stats.csum_partial_inner++;
} else {
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
}
} else
sq->stats.csum_offload_none++;
sq->stats.csum_none++;
if (sq->cc != sq->prev_cc) {
sq->prev_cc = sq->cc;

View File

@ -1508,8 +1508,9 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
{ PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
{ PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
{ PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5 */
{ PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
{ PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
{ PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5, PCIe 4.0 */
{ 0, }
};

View File

@ -105,6 +105,9 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
struct mlx5e_vxlan *vxlan;
int err;
if (mlx5e_vxlan_lookup_port(priv, port))
goto free_work;
if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
goto free_work;

View File

@ -75,14 +75,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
@ -111,14 +111,14 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
@ -148,13 +148,14 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
err = mlx5_buf_alloc(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf);
err = mlx5_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}

View File

@ -408,7 +408,11 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
}
mlxsw_sp_txhdr_construct(skb, &tx_info);
len = skb->len;
/* TX header is consumed by HW on the way so we shouldn't count its
* bytes as being sent.
*/
len = skb->len - MLXSW_TXHDR_LEN;
/* Due to a race we might fail here because of a full queue. In that
* unlikely case we simply drop the packet.
*/

View File

@ -316,7 +316,10 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
}
}
mlxsw_sx_txhdr_construct(skb, &tx_info);
len = skb->len;
/* TX header is consumed by HW on the way so we shouldn't count its
* bytes as being sent.
*/
len = skb->len - MLXSW_TXHDR_LEN;
/* Due to a race we might fail here because of a full queue. In that
* unlikely case we simply drop the packet.
*/

View File

@ -2015,7 +2015,7 @@ static void nfp_net_open_stack(struct nfp_net *nn)
netif_tx_wake_all_queues(nn->netdev);
enable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
nfp_net_read_link_status(nn);
}
@ -2044,7 +2044,7 @@ static int nfp_net_netdev_open(struct net_device *netdev)
NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
if (err)
goto err_free_exn;
disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
GFP_KERNEL);
@ -2133,7 +2133,7 @@ static void nfp_net_close_stack(struct nfp_net *nn)
{
unsigned int r;
disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
netif_carrier_off(nn->netdev);
nn->link_up = false;

View File

@ -3700,6 +3700,7 @@ struct public_port {
#define MEDIA_DA_TWINAX 0x3
#define MEDIA_BASE_T 0x4
#define MEDIA_SFP_1G_FIBER 0x5
#define MEDIA_MODULE_FIBER 0x6
#define MEDIA_KR 0xf0
#define MEDIA_NOT_PRESENT 0xff

View File

@ -72,6 +72,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
p_ramrod->mtu = cpu_to_le16(p_params->mtu);
p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
p_ramrod->untagged = p_params->only_untagged;
SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
@ -247,10 +248,6 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
!!(accept_filter & QED_ACCEPT_NONE));
SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
!!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
!!(accept_filter & QED_ACCEPT_NONE));
@ -1748,7 +1745,8 @@ static int qed_start_vport(struct qed_dev *cdev,
start.vport_id, start.mtu);
}
qed_reset_vport_stats(cdev);
if (params->clear_stats)
qed_reset_vport_stats(cdev);
return 0;
}

View File

@ -1085,6 +1085,7 @@ static int qed_get_port_type(u32 media_type)
case MEDIA_SFPP_10G_FIBER:
case MEDIA_SFP_1G_FIBER:
case MEDIA_XFP_FIBER:
case MEDIA_MODULE_FIBER:
case MEDIA_KR:
port_type = PORT_FIBRE;
break;

View File

@ -213,19 +213,15 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
DQ_XCM_CORE_SPQ_PROD_CMD);
db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
/* validate producer is up to-date */
rmb();
db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
/* do not reorder */
barrier();
/* make sure the SPQE is updated before the doorbell */
wmb();
DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
/* make sure doorbell is rang */
mmiowb();
wmb();
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
@ -614,7 +610,9 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn,
*p_en2 = *p_ent;
kfree(p_ent);
/* EBLOCK responsible to free the allocated p_ent */
if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
kfree(p_ent);
p_ent = p_en2;
}
@ -749,6 +747,15 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
* Thus, after gaining the answer perform the cleanup here.
*/
rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
if (p_ent->queue == &p_spq->unlimited_pending) {
/* This is an allocated p_ent which does not need to
* return to pool.
*/
kfree(p_ent);
return rc;
}
if (rc)
goto spq_post_fail2;
@ -844,8 +851,12 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
fw_return_code);
if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
/* EBLOCK is responsible for freeing its own entry */
if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
(found->queue == &p_spq->unlimited_pending))
/* EBLOCK is responsible for returning its own entry into the
* free list, unless it originally added the entry into the
* unlimited pending list.
*/
qed_spq_return_entry(p_hwfn, found);
/* Attempt to post pending requests */

View File

@ -3231,7 +3231,7 @@ static int qede_stop_queues(struct qede_dev *edev)
return rc;
}
static int qede_start_queues(struct qede_dev *edev)
static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
{
int rc, tc, i;
int vlan_removal_en = 1;
@ -3462,6 +3462,7 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
enum qede_load_mode {
QEDE_LOAD_NORMAL,
QEDE_LOAD_RELOAD,
};
static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
@ -3500,7 +3501,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
goto err3;
DP_INFO(edev, "Setup IRQs succeeded\n");
rc = qede_start_queues(edev);
rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
if (rc)
goto err4;
DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
@ -3555,7 +3556,7 @@ void qede_reload(struct qede_dev *edev,
if (func)
func(edev, args);
qede_load(edev, QEDE_LOAD_NORMAL);
qede_load(edev, QEDE_LOAD_RELOAD);
mutex_lock(&edev->qede_lock);
qede_config_rx_mode(edev->ndev);

View File

@ -2220,7 +2220,7 @@ void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
if (!opcode)
return;
ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0]));
ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
desc = &sds_ring->desc_head[consumer];
desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);

View File

@ -104,7 +104,8 @@ int efx_farch_test_registers(struct efx_nic *efx,
const struct efx_farch_register_test *regs,
size_t n_regs)
{
unsigned address = 0, i, j;
unsigned address = 0;
int i, j;
efx_oword_t mask, imask, original, reg, buf;
for (i = 0; i < n_regs; ++i) {

View File

@ -116,7 +116,6 @@ struct smsc911x_data {
struct phy_device *phy_dev;
struct mii_bus *mii_bus;
int phy_irq[PHY_MAX_ADDR];
unsigned int using_extphy;
int last_duplex;
int last_carrier;
@ -1073,7 +1072,6 @@ static int smsc911x_mii_init(struct platform_device *pdev,
pdata->mii_bus->priv = pdata;
pdata->mii_bus->read = smsc911x_mii_read;
pdata->mii_bus->write = smsc911x_mii_write;
memcpy(pdata->mii_bus->irq, pdata->phy_irq, sizeof(pdata->mii_bus));
pdata->mii_bus->parent = &pdev->dev;

View File

@ -2505,8 +2505,6 @@ static int cpsw_probe(struct platform_device *pdev)
clean_ale_ret:
cpsw_ale_destroy(priv->ale);
clean_dma_ret:
cpdma_chan_destroy(priv->txch);
cpdma_chan_destroy(priv->rxch);
cpdma_ctlr_destroy(priv->dma);
clean_runtime_disable_ret:
pm_runtime_disable(&pdev->dev);
@ -2534,8 +2532,6 @@ static int cpsw_remove(struct platform_device *pdev)
unregister_netdev(ndev);
cpsw_ale_destroy(priv->ale);
cpdma_chan_destroy(priv->txch);
cpdma_chan_destroy(priv->rxch);
cpdma_ctlr_destroy(priv->dma);
pm_runtime_disable(&pdev->dev);
device_for_each_child(&pdev->dev, NULL, cpsw_remove_child_device);

View File

@ -462,7 +462,7 @@ static void tile_tx_timestamp(struct sk_buff *skb, int instance)
if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) {
struct mpipe_data *md = &mpipe_data[instance];
struct skb_shared_hwtstamps shhwtstamps;
struct timespec ts;
struct timespec64 ts;
shtx->tx_flags |= SKBTX_IN_PROGRESS;
gxio_mpipe_get_timestamp(&md->context, &ts);
@ -886,9 +886,9 @@ static struct ptp_clock_info ptp_mpipe_caps = {
/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
{
struct timespec ts;
struct timespec64 ts;
getnstimeofday(&ts);
ktime_get_ts64(&ts);
gxio_mpipe_set_timestamp(&md->context, &ts);
mutex_init(&md->ptp_lock);

View File

@ -17,4 +17,4 @@ skfp-objs := skfddi.o hwmtm.o fplustm.o smt.o cfm.o \
# projects. To keep the source common for all those drivers (and
# thus simplify fixes to it), please do not clean it up!
ccflags-y := -Idrivers/net/skfp -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes
ccflags-y := -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes

View File

@ -958,8 +958,8 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
dev->stats.collisions++;
else if (err == -ENETUNREACH)
dev->stats.tx_carrier_errors++;
else
dev->stats.tx_errors++;
dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
@ -1048,8 +1048,8 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
dev->stats.collisions++;
else if (err == -ENETUNREACH)
dev->stats.tx_carrier_errors++;
else
dev->stats.tx_errors++;
dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
#endif
@ -1508,6 +1508,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
{
struct nlattr *tb[IFLA_MAX + 1];
struct net_device *dev;
LIST_HEAD(list_kill);
int err;
memset(tb, 0, sizeof(tb));
@ -1519,8 +1520,10 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
err = geneve_configure(net, dev, &geneve_remote_unspec,
0, 0, 0, 0, htons(dst_port), true,
GENEVE_F_UDP_ZERO_CSUM6_RX);
if (err)
goto err;
if (err) {
free_netdev(dev);
return ERR_PTR(err);
}
/* openvswitch users expect packet sizes to be unrestricted,
* so set the largest MTU we can.
@ -1529,10 +1532,15 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
if (err)
goto err;
err = rtnl_configure_link(dev, NULL);
if (err < 0)
goto err;
return dev;
err:
free_netdev(dev);
geneve_dellink(dev, &list_kill);
unregister_netdevice_many(&list_kill);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(geneve_dev_create_fb);

View File

@ -605,12 +605,41 @@ static void macsec_encrypt_done(struct crypto_async_request *base, int err)
dev_put(dev);
}
static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
unsigned char **iv,
struct scatterlist **sg)
{
size_t size, iv_offset, sg_offset;
struct aead_request *req;
void *tmp;
size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
iv_offset = size;
size += GCM_AES_IV_LEN;
size = ALIGN(size, __alignof__(struct scatterlist));
sg_offset = size;
size += sizeof(struct scatterlist) * (MAX_SKB_FRAGS + 1);
tmp = kmalloc(size, GFP_ATOMIC);
if (!tmp)
return NULL;
*iv = (unsigned char *)(tmp + iv_offset);
*sg = (struct scatterlist *)(tmp + sg_offset);
req = tmp;
aead_request_set_tfm(req, tfm);
return req;
}
static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
struct net_device *dev)
{
int ret;
struct scatterlist sg[MAX_SKB_FRAGS + 1];
unsigned char iv[GCM_AES_IV_LEN];
struct scatterlist *sg;
unsigned char *iv;
struct ethhdr *eth;
struct macsec_eth_header *hh;
size_t unprotected_len;
@ -668,8 +697,6 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
macsec_fill_sectag(hh, secy, pn);
macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
macsec_fill_iv(iv, secy->sci, pn);
skb_put(skb, secy->icv_len);
if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
@ -684,13 +711,15 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
return ERR_PTR(-EINVAL);
}
req = aead_request_alloc(tx_sa->key.tfm, GFP_ATOMIC);
req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg);
if (!req) {
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(-ENOMEM);
}
macsec_fill_iv(iv, secy->sci, pn);
sg_init_table(sg, MAX_SKB_FRAGS + 1);
skb_to_sgvec(skb, sg, 0, skb->len);
@ -861,7 +890,6 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err)
out:
macsec_rxsa_put(rx_sa);
dev_put(dev);
return;
}
static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
@ -871,8 +899,8 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
struct macsec_secy *secy)
{
int ret;
struct scatterlist sg[MAX_SKB_FRAGS + 1];
unsigned char iv[GCM_AES_IV_LEN];
struct scatterlist *sg;
unsigned char *iv;
struct aead_request *req;
struct macsec_eth_header *hdr;
u16 icv_len = secy->icv_len;
@ -882,7 +910,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
if (!skb)
return ERR_PTR(-ENOMEM);
req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC);
req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg);
if (!req) {
kfree_skb(skb);
return ERR_PTR(-ENOMEM);
@ -1234,7 +1262,7 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
struct crypto_aead *tfm;
int ret;
tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
if (!tfm || IS_ERR(tfm))
return NULL;
@ -3361,6 +3389,7 @@ static void __exit macsec_exit(void)
genl_unregister_family(&macsec_fam);
rtnl_link_unregister(&macsec_link_ops);
unregister_netdevice_notifier(&macsec_notifier);
rcu_barrier();
}
module_init(macsec_init);

View File

@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/gpio.h>
#include <linux/idr.h>
#define MII_REGS_NUM 29
@ -286,6 +287,8 @@ int fixed_phy_add(unsigned int irq, int phy_addr,
}
EXPORT_SYMBOL_GPL(fixed_phy_add);
static DEFINE_IDA(phy_fixed_ida);
static void fixed_phy_del(int phy_addr)
{
struct fixed_mdio_bus *fmb = &platform_fmb;
@ -297,14 +300,12 @@ static void fixed_phy_del(int phy_addr)
if (gpio_is_valid(fp->link_gpio))
gpio_free(fp->link_gpio);
kfree(fp);
ida_simple_remove(&phy_fixed_ida, phy_addr);
return;
}
}
}
static int phy_fixed_addr;
static DEFINE_SPINLOCK(phy_fixed_addr_lock);
struct phy_device *fixed_phy_register(unsigned int irq,
struct fixed_phy_status *status,
int link_gpio,
@ -319,17 +320,15 @@ struct phy_device *fixed_phy_register(unsigned int irq,
return ERR_PTR(-EPROBE_DEFER);
/* Get the next available PHY address, up to PHY_MAX_ADDR */
spin_lock(&phy_fixed_addr_lock);
if (phy_fixed_addr == PHY_MAX_ADDR) {
spin_unlock(&phy_fixed_addr_lock);
return ERR_PTR(-ENOSPC);
}
phy_addr = phy_fixed_addr++;
spin_unlock(&phy_fixed_addr_lock);
phy_addr = ida_simple_get(&phy_fixed_ida, 0, PHY_MAX_ADDR, GFP_KERNEL);
if (phy_addr < 0)
return ERR_PTR(phy_addr);
ret = fixed_phy_add(irq, phy_addr, status, link_gpio);
if (ret < 0)
if (ret < 0) {
ida_simple_remove(&phy_fixed_ida, phy_addr);
return ERR_PTR(ret);
}
phy = get_phy_device(fmb->mii_bus, phy_addr, false);
if (IS_ERR(phy)) {
@ -434,6 +433,7 @@ static void __exit fixed_mdio_bus_exit(void)
list_del(&fp->node);
kfree(fp);
}
ida_destroy(&phy_fixed_ida);
}
module_exit(fixed_mdio_bus_exit);

View File

@ -285,6 +285,48 @@ static int marvell_config_aneg(struct phy_device *phydev)
return 0;
}
static int m88e1111_config_aneg(struct phy_device *phydev)
{
int err;
/* The Marvell PHY has an errata which requires
* that certain registers get written in order
* to restart autonegotiation
*/
err = phy_write(phydev, MII_BMCR, BMCR_RESET);
err = marvell_set_polarity(phydev, phydev->mdix);
if (err < 0)
return err;
err = phy_write(phydev, MII_M1111_PHY_LED_CONTROL,
MII_M1111_PHY_LED_DIRECT);
if (err < 0)
return err;
err = genphy_config_aneg(phydev);
if (err < 0)
return err;
if (phydev->autoneg != AUTONEG_ENABLE) {
int bmcr;
/* A write to speed/duplex bits (that is performed by
* genphy_config_aneg() call above) must be followed by
* a software reset. Otherwise, the write has no effect.
*/
bmcr = phy_read(phydev, MII_BMCR);
if (bmcr < 0)
return bmcr;
err = phy_write(phydev, MII_BMCR, bmcr | BMCR_RESET);
if (err < 0)
return err;
}
return 0;
}
#ifdef CONFIG_OF_MDIO
/*
* Set and/or override some configuration registers based on the
@ -407,15 +449,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
if (err < 0)
return err;
oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF);
phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
err = genphy_config_aneg(phydev);
return err;
return genphy_config_aneg(phydev);
}
static int m88e1318_config_aneg(struct phy_device *phydev)
@ -636,6 +670,28 @@ static int m88e1111_config_init(struct phy_device *phydev)
return phy_write(phydev, MII_BMCR, BMCR_RESET);
}
static int m88e1121_config_init(struct phy_device *phydev)
{
int err, oldpage;
oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
if (err < 0)
return err;
/* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
err = phy_write(phydev, MII_88E1121_PHY_LED_CTRL,
MII_88E1121_PHY_LED_DEF);
if (err < 0)
return err;
phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
/* Set marvell,reg-init configuration from device tree */
return marvell_config_init(phydev);
}
static int m88e1510_config_init(struct phy_device *phydev)
{
int err;
@ -668,7 +724,7 @@ static int m88e1510_config_init(struct phy_device *phydev)
return err;
}
return marvell_config_init(phydev);
return m88e1121_config_init(phydev);
}
static int m88e1118_config_aneg(struct phy_device *phydev)
@ -1161,7 +1217,7 @@ static struct phy_driver marvell_drivers[] = {
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
.config_aneg = &marvell_config_aneg,
.config_aneg = &m88e1111_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
@ -1196,7 +1252,7 @@ static struct phy_driver marvell_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &marvell_config_init,
.config_init = &m88e1121_config_init,
.config_aneg = &m88e1121_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
@ -1215,7 +1271,7 @@ static struct phy_driver marvell_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &marvell_config_init,
.config_init = &m88e1121_config_init,
.config_aneg = &m88e1318_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,

View File

@ -75,22 +75,13 @@ static int smsc_phy_reset(struct phy_device *phydev)
* in all capable mode before using it.
*/
if ((rc & MII_LAN83C185_MODE_MASK) == MII_LAN83C185_MODE_POWERDOWN) {
int timeout = 50000;
/* set "all capable" mode and reset the phy */
/* set "all capable" mode */
rc |= MII_LAN83C185_MODE_ALL;
phy_write(phydev, MII_LAN83C185_SPECIAL_MODES, rc);
phy_write(phydev, MII_BMCR, BMCR_RESET);
/* wait end of reset (max 500 ms) */
do {
udelay(10);
if (timeout-- == 0)
return -1;
rc = phy_read(phydev, MII_BMCR);
} while (rc & BMCR_RESET);
}
return 0;
/* reset the phy */
return genphy_soft_reset(phydev);
}
static int lan911x_config_init(struct phy_device *phydev)

View File

@ -1203,8 +1203,10 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_dev_open;
}
netif_addr_lock_bh(dev);
dev_uc_sync_multiple(port_dev, dev);
dev_mc_sync_multiple(port_dev, dev);
netif_addr_unlock_bh(dev);
err = vlan_vids_add_by_dev(port_dev, dev);
if (err) {

View File

@ -31,7 +31,7 @@
#define NETNEXT_VERSION "08"
/* Information for net */
#define NET_VERSION "3"
#define NET_VERSION "4"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@ -116,6 +116,7 @@
#define USB_TX_DMA 0xd434
#define USB_TOLERANCE 0xd490
#define USB_LPM_CTRL 0xd41a
#define USB_BMU_RESET 0xd4b0
#define USB_UPS_CTRL 0xd800
#define USB_MISC_0 0xd81a
#define USB_POWER_CUT 0xd80a
@ -338,6 +339,10 @@
#define TEST_MODE_DISABLE 0x00000001
#define TX_SIZE_ADJUST1 0x00000100
/* USB_BMU_RESET */
#define BMU_RESET_EP_IN 0x01
#define BMU_RESET_EP_OUT 0x02
/* USB_UPS_CTRL */
#define POWER_CUT 0x0100
@ -2169,7 +2174,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
static void r8153_set_rx_early_size(struct r8152 *tp)
{
u32 mtu = tp->netdev->mtu;
u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 4;
u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8;
ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data);
}
@ -2456,6 +2461,17 @@ static void r8153_teredo_off(struct r8152 *tp)
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
}
static void rtl_reset_bmu(struct r8152 *tp)
{
u32 ocp_data;
ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_BMU_RESET);
ocp_data &= ~(BMU_RESET_EP_IN | BMU_RESET_EP_OUT);
ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data);
ocp_data |= BMU_RESET_EP_IN | BMU_RESET_EP_OUT;
ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data);
}
static void r8152_aldps_en(struct r8152 *tp, bool enable)
{
if (enable) {
@ -2681,6 +2697,7 @@ static void r8153_first_init(struct r8152 *tp)
r8153_hw_phy_cfg(tp);
rtl8152_nic_reset(tp);
rtl_reset_bmu(tp);
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
ocp_data &= ~NOW_IS_OOB;
@ -2742,6 +2759,7 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
rtl_disable(tp);
rtl_reset_bmu(tp);
for (i = 0; i < 1000; i++) {
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
@ -2803,6 +2821,7 @@ static void rtl8153_disable(struct r8152 *tp)
{
r8153_aldps_en(tp, false);
rtl_disable(tp);
rtl_reset_bmu(tp);
r8153_aldps_en(tp, true);
usb_enable_lpm(tp->udev);
}
@ -3382,15 +3401,11 @@ static void r8153_init(struct r8152 *tp)
r8153_power_cut_en(tp, false);
r8153_u1u2en(tp, true);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ALDPS_SPDWN_RATIO);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, EEE_SPDWN_RATIO);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3,
PKT_AVAIL_SPDWN_EN | SUSPEND_SPDWN_EN |
U1U2_SPDWN_EN | L1_SPDWN_EN);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4,
PWRSAVE_SPDWN_EN | RXDV_SPDWN_EN | TX10MIDLE_EN |
TP100_SPDWN_EN | TP500_SPDWN_EN | TP1000_SPDWN_EN |
EEE_SPDWN_EN);
/* MAC clock speed down */
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
r8153_enable_eee(tp);
r8153_aldps_en(tp, true);

View File

@ -304,7 +304,7 @@ static int vrf_rt6_create(struct net_device *dev)
dst_hold(&rt6->dst);
rt6->rt6i_table = rt6i_table;
rt6->dst.output = vrf_output6;
rt6->dst.output = vrf_output6;
rcu_assign_pointer(vrf->rt6, rt6);
rc = 0;
@ -403,7 +403,7 @@ static int vrf_rtable_create(struct net_device *dev)
if (!rth)
return -ENOMEM;
rth->dst.output = vrf_output;
rth->dst.output = vrf_output;
rth->rt_table_id = vrf->tb_id;
rcu_assign_pointer(vrf->rth, rth);

View File

@ -2952,30 +2952,6 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
return 0;
}
struct net_device *vxlan_dev_create(struct net *net, const char *name,
u8 name_assign_type, struct vxlan_config *conf)
{
struct nlattr *tb[IFLA_MAX+1];
struct net_device *dev;
int err;
memset(&tb, 0, sizeof(tb));
dev = rtnl_create_link(net, name, name_assign_type,
&vxlan_link_ops, tb);
if (IS_ERR(dev))
return dev;
err = vxlan_dev_configure(net, dev, conf);
if (err < 0) {
free_netdev(dev);
return ERR_PTR(err);
}
return dev;
}
EXPORT_SYMBOL_GPL(vxlan_dev_create);
static int vxlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
@ -3268,6 +3244,40 @@ static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
.get_link_net = vxlan_get_link_net,
};
struct net_device *vxlan_dev_create(struct net *net, const char *name,
u8 name_assign_type,
struct vxlan_config *conf)
{
struct nlattr *tb[IFLA_MAX + 1];
struct net_device *dev;
int err;
memset(&tb, 0, sizeof(tb));
dev = rtnl_create_link(net, name, name_assign_type,
&vxlan_link_ops, tb);
if (IS_ERR(dev))
return dev;
err = vxlan_dev_configure(net, dev, conf);
if (err < 0) {
free_netdev(dev);
return ERR_PTR(err);
}
err = rtnl_configure_link(dev, NULL);
if (err < 0) {
LIST_HEAD(list_kill);
vxlan_dellink(dev, &list_kill);
unregister_netdevice_many(&list_kill);
return ERR_PTR(err);
}
return dev;
}
EXPORT_SYMBOL_GPL(vxlan_dev_create);
static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
struct net_device *dev)
{

View File

@ -1083,7 +1083,7 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
}
ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "",
ar->running_fw->fw_file.fw_features,
fw_file->fw_features,
sizeof(fw_file->fw_features));
break;
case ATH10K_FW_IE_FW_IMAGE:

View File

@ -1904,7 +1904,6 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
return;
}
}
ath10k_htt_rx_msdu_buff_replenish(htt);
}
static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,

View File

@ -679,10 +679,10 @@ static int ath10k_peer_create(struct ath10k *ar,
peer = ath10k_peer_find(ar, vdev_id, addr);
if (!peer) {
spin_unlock_bh(&ar->data_lock);
ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
addr, vdev_id);
ath10k_wmi_peer_delete(ar, vdev_id, addr);
spin_unlock_bh(&ar->data_lock);
return -ENOENT;
}

View File

@ -1122,12 +1122,12 @@ enum {
#define AR9300_NUM_GPIO 16
#define AR9330_NUM_GPIO 16
#define AR9340_NUM_GPIO 23
#define AR9462_NUM_GPIO 10
#define AR9462_NUM_GPIO 14
#define AR9485_NUM_GPIO 12
#define AR9531_NUM_GPIO 18
#define AR9550_NUM_GPIO 24
#define AR9561_NUM_GPIO 23
#define AR9565_NUM_GPIO 12
#define AR9565_NUM_GPIO 14
#define AR9580_NUM_GPIO 16
#define AR7010_NUM_GPIO 16
@ -1139,12 +1139,12 @@ enum {
#define AR9300_GPIO_MASK 0x0000F4FF
#define AR9330_GPIO_MASK 0x0000F4FF
#define AR9340_GPIO_MASK 0x0000000F
#define AR9462_GPIO_MASK 0x000003FF
#define AR9462_GPIO_MASK 0x00003FFF
#define AR9485_GPIO_MASK 0x00000FFF
#define AR9531_GPIO_MASK 0x0000000F
#define AR9550_GPIO_MASK 0x0000000F
#define AR9561_GPIO_MASK 0x0000000F
#define AR9565_GPIO_MASK 0x00000FFF
#define AR9565_GPIO_MASK 0x00003FFF
#define AR9580_GPIO_MASK 0x0000F4FF
#define AR7010_GPIO_MASK 0x0000FFFF

View File

@ -3851,8 +3851,8 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
if (idx != 0)
return -ENOENT;
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return -ENOENT;
mutex_lock(&mvm->mutex);
@ -3898,8 +3898,8 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return;
/* if beacon filtering isn't on mac80211 does it anyway */

View File

@ -581,7 +581,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
struct iwl_rx_mpdu_desc *desc)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_sta *mvm_sta;
struct iwl_mvm_baid_data *baid_data;
struct iwl_mvm_reorder_buffer *buffer;
struct sk_buff *tail;
@ -604,6 +604,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
if (WARN_ON(IS_ERR_OR_NULL(sta)))
return false;
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
/* not a data packet */
if (!ieee80211_is_data_qos(hdr->frame_control) ||
is_multicast_ether_addr(hdr->addr1))

View File

@ -1222,7 +1222,7 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
return -EIO;
}
#define SCAN_TIMEOUT (16 * HZ)
#define SCAN_TIMEOUT (20 * HZ)
void iwl_mvm_scan_timeout(unsigned long data)
{

View File

@ -1852,12 +1852,18 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
u8 sta_id = mvmvif->ap_sta_id;
sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
/*
* It is possible that the 'sta' parameter is NULL,
* for example when a GTK is removed - the sta_id will then
* be the AP ID, and no station was passed by mac80211.
*/
return iwl_mvm_sta_from_staid_protected(mvm, sta_id);
if (IS_ERR_OR_NULL(sta))
return NULL;
return iwl_mvm_sta_from_mac80211(sta);
}
return NULL;
@ -1955,6 +1961,14 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
struct ieee80211_key_seq seq;
const u8 *pn;
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_AES_CMAC:
igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
break;
default:
return -EINVAL;
}
memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
pn = seq.aes_cmac.pn;

View File

@ -1149,7 +1149,7 @@ static void rtl8192eu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
for (i = 0; i < retry; i++) {
path_b_ok = rtl8192eu_rx_iqk_path_b(priv);
if (path_a_ok == 0x03) {
if (path_b_ok == 0x03) {
val32 = rtl8xxxu_read32(priv,
REG_RX_POWER_BEFORE_IQK_B_2);
result[t][6] = (val32 >> 16) & 0x3ff;

View File

@ -111,6 +111,31 @@ enum bpf_access_type {
BPF_WRITE = 2
};
/* types of values stored in eBPF registers */
enum bpf_reg_type {
NOT_INIT = 0, /* nothing was written into register */
UNKNOWN_VALUE, /* reg doesn't contain a valid pointer */
PTR_TO_CTX, /* reg points to bpf_context */
CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
PTR_TO_MAP_VALUE, /* reg points to map element value */
PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
FRAME_PTR, /* reg == frame_pointer */
PTR_TO_STACK, /* reg == frame_pointer + imm */
CONST_IMM, /* constant integer value */
/* PTR_TO_PACKET represents:
* skb->data
* skb->data + imm
* skb->data + (u16) var
* skb->data + (u16) var + imm
* if (range > 0) then [ptr, ptr + range - off) is safe to access
* if (id > 0) means that some 'var' was added
* if (off > 0) menas that 'imm' was added
*/
PTR_TO_PACKET,
PTR_TO_PACKET_END, /* skb->data + headlen */
};
struct bpf_prog;
struct bpf_verifier_ops {
@ -120,7 +145,8 @@ struct bpf_verifier_ops {
/* return true if 'size' wide access at offset 'off' within bpf_context
* with 'type' (read or write) is allowed
*/
bool (*is_valid_access)(int off, int size, enum bpf_access_type type);
bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
enum bpf_reg_type *reg_type);
u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
int src_reg, int ctx_off,
@ -238,6 +264,10 @@ static inline struct bpf_prog *bpf_prog_get(u32 ufd)
static inline void bpf_prog_put(struct bpf_prog *prog)
{
}
static inline void bpf_prog_put_rcu(struct bpf_prog *prog)
{
}
#endif /* CONFIG_BPF_SYSCALL */
/* verifier prototypes for helper functions called from eBPF programs */

View File

@ -52,6 +52,12 @@ struct sock *inet_diag_find_one_icsk(struct net *net,
int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
struct inet_diag_msg *r, int ext,
struct user_namespace *user_ns);
extern int inet_diag_register(const struct inet_diag_handler *handler);
extern void inet_diag_unregister(const struct inet_diag_handler *handler);
#endif /* _INET_DIAG_H_ */

View File

@ -466,6 +466,7 @@ enum {
enum {
MLX4_INTERFACE_STATE_UP = 1 << 0,
MLX4_INTERFACE_STATE_DELETION = 1 << 1,
MLX4_INTERFACE_STATE_SHUTDOWN = 1 << 2,
};
#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \

View File

@ -251,7 +251,8 @@ do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
net_ratelimit()) \
__dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
__dynamic_pr_debug(&descriptor, pr_fmt(fmt), \
##__VA_ARGS__); \
} while (0)
#elif defined(DEBUG)
#define net_dbg_ratelimited(fmt, ...) \

View File

@ -49,6 +49,7 @@ struct qed_start_vport_params {
bool drop_ttl0;
u8 vport_id;
u16 mtu;
bool clear_stats;
};
struct qed_stop_rxq_params {

View File

@ -36,6 +36,9 @@ enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk)
{
switch (sk->sk_family) {
case AF_INET:
if (sk->sk_type == SOCK_RAW)
return SKNLGRP_NONE;
switch (sk->sk_protocol) {
case IPPROTO_TCP:
return SKNLGRP_INET_TCP_DESTROY;
@ -45,6 +48,9 @@ enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk)
return SKNLGRP_NONE;
}
case AF_INET6:
if (sk->sk_type == SOCK_RAW)
return SKNLGRP_NONE;
switch (sk->sk_protocol) {
case IPPROTO_TCP:
return SKNLGRP_INET6_TCP_DESTROY;

View File

@ -26,7 +26,7 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version);
struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
u8 name_assign_type);
int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
bool *csum_err, __be16 proto);
bool *csum_err, __be16 proto, int nhs);
static inline int gre_calc_hlen(__be16 o_flags)
{

View File

@ -167,6 +167,7 @@ struct nft_set_elem {
struct nft_set;
struct nft_set_iter {
u8 genmask;
unsigned int count;
unsigned int skip;
int err;

View File

@ -36,7 +36,7 @@ struct tcf_meta_ops {
int (*encode)(struct sk_buff *, void *, struct tcf_meta_info *);
int (*decode)(struct sk_buff *, void *, u16 len);
int (*get)(struct sk_buff *skb, struct tcf_meta_info *mi);
int (*alloc)(struct tcf_meta_info *, void *);
int (*alloc)(struct tcf_meta_info *, void *, gfp_t);
void (*release)(struct tcf_meta_info *);
int (*validate)(void *val, int len);
struct module *owner;
@ -48,8 +48,8 @@ int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi);
int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi);
int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
const void *dval);
int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval);
int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval);
int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi);
int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi);
int ife_validate_meta_u32(void *val, int len);

View File

@ -33,6 +33,7 @@ header-y += xt_NFLOG.h
header-y += xt_NFQUEUE.h
header-y += xt_RATEEST.h
header-y += xt_SECMARK.h
header-y += xt_SYNPROXY.h
header-y += xt_TCPMSS.h
header-y += xt_TCPOPTSTRIP.h
header-y += xt_TEE.h

View File

@ -1,6 +1,8 @@
#ifndef _XT_SYNPROXY_H
#define _XT_SYNPROXY_H
#include <linux/types.h>
#define XT_SYNPROXY_OPT_MSS 0x01
#define XT_SYNPROXY_OPT_WSCALE 0x02
#define XT_SYNPROXY_OPT_SACK_PERM 0x04

View File

@ -126,31 +126,6 @@
* are set to NOT_INIT to indicate that they are no longer readable.
*/
/* types of values stored in eBPF registers */
enum bpf_reg_type {
NOT_INIT = 0, /* nothing was written into register */
UNKNOWN_VALUE, /* reg doesn't contain a valid pointer */
PTR_TO_CTX, /* reg points to bpf_context */
CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
PTR_TO_MAP_VALUE, /* reg points to map element value */
PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
FRAME_PTR, /* reg == frame_pointer */
PTR_TO_STACK, /* reg == frame_pointer + imm */
CONST_IMM, /* constant integer value */
/* PTR_TO_PACKET represents:
* skb->data
* skb->data + imm
* skb->data + (u16) var
* skb->data + (u16) var + imm
* if (range > 0) then [ptr, ptr + range - off) is safe to access
* if (id > 0) means that some 'var' was added
* if (off > 0) menas that 'imm' was added
*/
PTR_TO_PACKET,
PTR_TO_PACKET_END, /* skb->data + headlen */
};
struct reg_state {
enum bpf_reg_type type;
union {
@ -695,10 +670,10 @@ static int check_packet_access(struct verifier_env *env, u32 regno, int off,
/* check access to 'struct bpf_context' fields */
static int check_ctx_access(struct verifier_env *env, int off, int size,
enum bpf_access_type t)
enum bpf_access_type t, enum bpf_reg_type *reg_type)
{
if (env->prog->aux->ops->is_valid_access &&
env->prog->aux->ops->is_valid_access(off, size, t)) {
env->prog->aux->ops->is_valid_access(off, size, t, reg_type)) {
/* remember the offset of last byte accessed in ctx */
if (env->prog->aux->max_ctx_offset < off + size)
env->prog->aux->max_ctx_offset = off + size;
@ -798,21 +773,19 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off,
mark_reg_unknown_value(state->regs, value_regno);
} else if (reg->type == PTR_TO_CTX) {
enum bpf_reg_type reg_type = UNKNOWN_VALUE;
if (t == BPF_WRITE && value_regno >= 0 &&
is_pointer_value(env, value_regno)) {
verbose("R%d leaks addr into ctx\n", value_regno);
return -EACCES;
}
err = check_ctx_access(env, off, size, t);
err = check_ctx_access(env, off, size, t, &reg_type);
if (!err && t == BPF_READ && value_regno >= 0) {
mark_reg_unknown_value(state->regs, value_regno);
if (off == offsetof(struct __sk_buff, data) &&
env->allow_ptr_leaks)
if (env->allow_ptr_leaks)
/* note that reg.[id|off|range] == 0 */
state->regs[value_regno].type = PTR_TO_PACKET;
else if (off == offsetof(struct __sk_buff, data_end) &&
env->allow_ptr_leaks)
state->regs[value_regno].type = PTR_TO_PACKET_END;
state->regs[value_regno].type = reg_type;
}
} else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) {

View File

@ -7529,7 +7529,7 @@ static void perf_event_free_bpf_prog(struct perf_event *event)
prog = event->tp_event->prog;
if (prog) {
event->tp_event->prog = NULL;
bpf_prog_put(prog);
bpf_prog_put_rcu(prog);
}
}

View File

@ -209,6 +209,10 @@ static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
event->pmu->count)
return -EINVAL;
if (unlikely(event->attr.type != PERF_TYPE_HARDWARE &&
event->attr.type != PERF_TYPE_RAW))
return -EINVAL;
/*
* we don't know if the function is run successfully by the
* return value. It can be judged in other places, such as
@ -349,7 +353,8 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
}
/* bpf+kprobe programs can access fields of 'struct pt_regs' */
static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type)
static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
enum bpf_reg_type *reg_type)
{
/* check bounds */
if (off < 0 || off >= sizeof(struct pt_regs))
@ -427,7 +432,8 @@ static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
}
}
static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type)
static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
enum bpf_reg_type *reg_type)
{
if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
return false;

View File

@ -976,7 +976,8 @@ static int ax25_release(struct socket *sock)
release_sock(sk);
ax25_disconnect(ax25, 0);
lock_sock(sk);
ax25_destroy_socket(ax25);
if (!sock_flag(ax25->sk, SOCK_DESTROY))
ax25_destroy_socket(ax25);
break;
case AX25_STATE_3:

View File

@ -102,6 +102,7 @@ void ax25_ds_heartbeat_expiry(ax25_cb *ax25)
switch (ax25->state) {
case AX25_STATE_0:
case AX25_STATE_2:
/* Magic here: If we listen() and a new link dies before it
is accepted() it isn't 'dead' so doesn't get removed. */
if (!sk || sock_flag(sk, SOCK_DESTROY) ||
@ -111,6 +112,7 @@ void ax25_ds_heartbeat_expiry(ax25_cb *ax25)
sock_hold(sk);
ax25_destroy_socket(ax25);
bh_unlock_sock(sk);
/* Ungrab socket and destroy it */
sock_put(sk);
} else
ax25_destroy_socket(ax25);
@ -213,7 +215,8 @@ void ax25_ds_t1_timeout(ax25_cb *ax25)
case AX25_STATE_2:
if (ax25->n2count == ax25->n2) {
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
ax25_disconnect(ax25, ETIMEDOUT);
if (!sock_flag(ax25->sk, SOCK_DESTROY))
ax25_disconnect(ax25, ETIMEDOUT);
return;
} else {
ax25->n2count++;

View File

@ -38,6 +38,7 @@ void ax25_std_heartbeat_expiry(ax25_cb *ax25)
switch (ax25->state) {
case AX25_STATE_0:
case AX25_STATE_2:
/* Magic here: If we listen() and a new link dies before it
is accepted() it isn't 'dead' so doesn't get removed. */
if (!sk || sock_flag(sk, SOCK_DESTROY) ||
@ -47,6 +48,7 @@ void ax25_std_heartbeat_expiry(ax25_cb *ax25)
sock_hold(sk);
ax25_destroy_socket(ax25);
bh_unlock_sock(sk);
/* Ungrab socket and destroy it */
sock_put(sk);
} else
ax25_destroy_socket(ax25);
@ -144,7 +146,8 @@ void ax25_std_t1timer_expiry(ax25_cb *ax25)
case AX25_STATE_2:
if (ax25->n2count == ax25->n2) {
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
ax25_disconnect(ax25, ETIMEDOUT);
if (!sock_flag(ax25->sk, SOCK_DESTROY))
ax25_disconnect(ax25, ETIMEDOUT);
return;
} else {
ax25->n2count++;

View File

@ -264,7 +264,8 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
{
ax25_clear_queues(ax25);
ax25_stop_heartbeat(ax25);
if (!sock_flag(ax25->sk, SOCK_DESTROY))
ax25_stop_heartbeat(ax25);
ax25_stop_t1timer(ax25);
ax25_stop_t2timer(ax25);
ax25_stop_t3timer(ax25);

View File

@ -374,6 +374,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
if (skb_cow(skb, ETH_HLEN) < 0)
goto out;
ethhdr = eth_hdr(skb);
icmph = (struct batadv_icmp_header *)skb->data;
icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmph;
if (icmp_packet_rr->rr_cur >= BATADV_RR_LEN)

View File

@ -1033,7 +1033,9 @@ void batadv_softif_destroy_sysfs(struct net_device *soft_iface)
static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
struct list_head *head)
{
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
struct batadv_hard_iface *hard_iface;
struct batadv_softif_vlan *vlan;
list_for_each_entry(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->soft_iface == soft_iface)
@ -1041,6 +1043,13 @@ static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
BATADV_IF_CLEANUP_KEEP);
}
/* destroy the "untagged" VLAN */
vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
if (vlan) {
batadv_softif_destroy_vlan(bat_priv, vlan);
batadv_softif_vlan_put(vlan);
}
batadv_sysfs_del_meshif(soft_iface);
unregister_netdevice_queue(soft_iface, head);
}

View File

@ -650,8 +650,10 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
/* increase the refcounter of the related vlan */
vlan = batadv_softif_vlan_get(bat_priv, vid);
if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
addr, BATADV_PRINT_VID(vid))) {
if (!vlan) {
net_ratelimited_function(batadv_info, soft_iface,
"adding TT local entry %pM to non-existent VLAN %d\n",
addr, BATADV_PRINT_VID(vid));
kfree(tt_local);
tt_local = NULL;
goto out;
@ -691,7 +693,6 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
if (unlikely(hash_added != 0)) {
/* remove the reference for the hash */
batadv_tt_local_entry_put(tt_local);
batadv_softif_vlan_put(vlan);
goto out;
}
@ -2269,6 +2270,29 @@ static u32 batadv_tt_local_crc(struct batadv_priv *bat_priv,
return crc;
}
/**
* batadv_tt_req_node_release - free tt_req node entry
* @ref: kref pointer of the tt req_node entry
*/
static void batadv_tt_req_node_release(struct kref *ref)
{
struct batadv_tt_req_node *tt_req_node;
tt_req_node = container_of(ref, struct batadv_tt_req_node, refcount);
kfree(tt_req_node);
}
/**
* batadv_tt_req_node_put - decrement the tt_req_node refcounter and
* possibly release it
* @tt_req_node: tt_req_node to be free'd
*/
static void batadv_tt_req_node_put(struct batadv_tt_req_node *tt_req_node)
{
kref_put(&tt_req_node->refcount, batadv_tt_req_node_release);
}
static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
{
struct batadv_tt_req_node *node;
@ -2278,7 +2302,7 @@ static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
hlist_del_init(&node->list);
kfree(node);
batadv_tt_req_node_put(node);
}
spin_unlock_bh(&bat_priv->tt.req_list_lock);
@ -2315,7 +2339,7 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
if (batadv_has_timed_out(node->issued_at,
BATADV_TT_REQUEST_TIMEOUT)) {
hlist_del_init(&node->list);
kfree(node);
batadv_tt_req_node_put(node);
}
}
spin_unlock_bh(&bat_priv->tt.req_list_lock);
@ -2347,9 +2371,11 @@ batadv_tt_req_node_new(struct batadv_priv *bat_priv,
if (!tt_req_node)
goto unlock;
kref_init(&tt_req_node->refcount);
ether_addr_copy(tt_req_node->addr, orig_node->orig);
tt_req_node->issued_at = jiffies;
kref_get(&tt_req_node->refcount);
hlist_add_head(&tt_req_node->list, &bat_priv->tt.req_list);
unlock:
spin_unlock_bh(&bat_priv->tt.req_list_lock);
@ -2613,13 +2639,19 @@ static bool batadv_send_tt_request(struct batadv_priv *bat_priv,
out:
if (primary_if)
batadv_hardif_put(primary_if);
if (ret && tt_req_node) {
spin_lock_bh(&bat_priv->tt.req_list_lock);
/* hlist_del_init() verifies tt_req_node still is in the list */
hlist_del_init(&tt_req_node->list);
if (!hlist_unhashed(&tt_req_node->list)) {
hlist_del_init(&tt_req_node->list);
batadv_tt_req_node_put(tt_req_node);
}
spin_unlock_bh(&bat_priv->tt.req_list_lock);
kfree(tt_req_node);
}
if (tt_req_node)
batadv_tt_req_node_put(tt_req_node);
kfree(tvlv_tt_data);
return ret;
}
@ -3055,7 +3087,7 @@ static void batadv_handle_tt_response(struct batadv_priv *bat_priv,
if (!batadv_compare_eth(node->addr, resp_src))
continue;
hlist_del_init(&node->list);
kfree(node);
batadv_tt_req_node_put(node);
}
spin_unlock_bh(&bat_priv->tt.req_list_lock);

View File

@ -1137,11 +1137,13 @@ struct batadv_tt_change_node {
* struct batadv_tt_req_node - data to keep track of the tt requests in flight
* @addr: mac address address of the originator this request was sent to
* @issued_at: timestamp used for purging stale tt requests
* @refcount: number of contexts the object is used by
* @list: list node for batadv_priv_tt::req_list
*/
struct batadv_tt_req_node {
u8 addr[ETH_ALEN];
unsigned long issued_at;
struct kref refcount;
struct hlist_node list;
};

View File

@ -213,8 +213,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
}
EXPORT_SYMBOL_GPL(br_handle_frame_finish);
/* note: already called with rcu_read_lock */
static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
static void __br_handle_local_finish(struct sk_buff *skb)
{
struct net_bridge_port *p = br_port_get_rcu(skb->dev);
u16 vid = 0;
@ -222,6 +221,14 @@ static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_bu
/* check if vlan is allowed, to avoid spoofing */
if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid))
br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
}
/* note: already called with rcu_read_lock */
static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct net_bridge_port *p = br_port_get_rcu(skb->dev);
__br_handle_local_finish(skb);
BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
br_pass_frame_up(skb);
@ -274,7 +281,9 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
if (p->br->stp_enabled == BR_NO_STP ||
fwd_mask & (1u << dest[5]))
goto forward;
break;
*pskb = skb;
__br_handle_local_finish(skb);
return RX_HANDLER_PASS;
case 0x01: /* IEEE MAC (Pause) */
goto drop;

View File

@ -464,8 +464,11 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
&ip6h->saddr)) {
kfree_skb(skb);
br->has_ipv6_addr = 0;
return NULL;
}
br->has_ipv6_addr = 1;
ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
hopopt = (u8 *)(ip6h + 1);
@ -1745,6 +1748,7 @@ void br_multicast_init(struct net_bridge *br)
br->ip6_other_query.delay_time = 0;
br->ip6_querier.port = NULL;
#endif
br->has_ipv6_addr = 1;
spin_lock_init(&br->multicast_lock);
setup_timer(&br->multicast_router_timer,

View File

@ -1273,7 +1273,7 @@ static int br_fill_linkxstats(struct sk_buff *skb, const struct net_device *dev,
struct bridge_vlan_xstats vxi;
struct br_vlan_stats stats;
if (vl_idx++ < *prividx)
if (++vl_idx < *prividx)
continue;
memset(&vxi, 0, sizeof(vxi));
vxi.vid = v->vid;

View File

@ -314,6 +314,7 @@ struct net_bridge
u8 multicast_disabled:1;
u8 multicast_querier:1;
u8 multicast_query_use_ifaddr:1;
u8 has_ipv6_addr:1;
u32 hash_elasticity;
u32 hash_max;
@ -588,10 +589,22 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
static inline bool
__br_multicast_querier_exists(struct net_bridge *br,
struct bridge_mcast_other_query *querier)
struct bridge_mcast_other_query *querier,
const bool is_ipv6)
{
bool own_querier_enabled;
if (br->multicast_querier) {
if (is_ipv6 && !br->has_ipv6_addr)
own_querier_enabled = false;
else
own_querier_enabled = true;
} else {
own_querier_enabled = false;
}
return time_is_before_jiffies(querier->delay_time) &&
(br->multicast_querier || timer_pending(&querier->timer));
(own_querier_enabled || timer_pending(&querier->timer));
}
static inline bool br_multicast_querier_exists(struct net_bridge *br,
@ -599,10 +612,12 @@ static inline bool br_multicast_querier_exists(struct net_bridge *br,
{
switch (eth->h_proto) {
case (htons(ETH_P_IP)):
return __br_multicast_querier_exists(br, &br->ip4_other_query);
return __br_multicast_querier_exists(br,
&br->ip4_other_query, false);
#if IS_ENABLED(CONFIG_IPV6)
case (htons(ETH_P_IPV6)):
return __br_multicast_querier_exists(br, &br->ip6_other_query);
return __br_multicast_querier_exists(br,
&br->ip6_other_query, true);
#endif
default:
return false;

View File

@ -2085,7 +2085,8 @@ static bool __is_valid_access(int off, int size, enum bpf_access_type type)
}
static bool sk_filter_is_valid_access(int off, int size,
enum bpf_access_type type)
enum bpf_access_type type,
enum bpf_reg_type *reg_type)
{
switch (off) {
case offsetof(struct __sk_buff, tc_classid):
@ -2108,7 +2109,8 @@ static bool sk_filter_is_valid_access(int off, int size,
}
static bool tc_cls_act_is_valid_access(int off, int size,
enum bpf_access_type type)
enum bpf_access_type type,
enum bpf_reg_type *reg_type)
{
if (type == BPF_WRITE) {
switch (off) {
@ -2123,6 +2125,16 @@ static bool tc_cls_act_is_valid_access(int off, int size,
return false;
}
}
switch (off) {
case offsetof(struct __sk_buff, data):
*reg_type = PTR_TO_PACKET;
break;
case offsetof(struct __sk_buff, data_end):
*reg_type = PTR_TO_PACKET_END;
break;
}
return __is_valid_access(off, size, type);
}

View File

@ -2469,13 +2469,17 @@ int neigh_xmit(int index, struct net_device *dev,
tbl = neigh_tables[index];
if (!tbl)
goto out;
rcu_read_lock_bh();
neigh = __neigh_lookup_noref(tbl, addr, dev);
if (!neigh)
neigh = __neigh_create(tbl, addr, dev, false);
err = PTR_ERR(neigh);
if (IS_ERR(neigh))
if (IS_ERR(neigh)) {
rcu_read_unlock_bh();
goto out_kfree_skb;
}
err = neigh->output(neigh, skb);
rcu_read_unlock_bh();
}
else if (index == NEIGH_LINK_TABLE) {
err = dev_hard_header(skb, dev, ntohs(skb->protocol),

View File

@ -23,6 +23,11 @@ struct esp_skb_cb {
void *tmp;
};
struct esp_output_extra {
__be32 seqhi;
u32 esphoff;
};
#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
@ -35,11 +40,11 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
*
* TODO: Use spare space in skb for this where possible.
*/
static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen)
static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen)
{
unsigned int len;
len = seqhilen;
len = extralen;
len += crypto_aead_ivsize(aead);
@ -57,15 +62,16 @@ static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen)
return kmalloc(len, GFP_ATOMIC);
}
static inline __be32 *esp_tmp_seqhi(void *tmp)
static inline void *esp_tmp_extra(void *tmp)
{
return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
}
static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen)
{
return crypto_aead_ivsize(aead) ?
PTR_ALIGN((u8 *)tmp + seqhilen,
crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
PTR_ALIGN((u8 *)tmp + extralen,
crypto_aead_alignmask(aead) + 1) : tmp + extralen;
}
static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
@ -99,7 +105,7 @@ static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
{
struct ip_esp_hdr *esph = (void *)(skb->data + offset);
void *tmp = ESP_SKB_CB(skb)->tmp;
__be32 *seqhi = esp_tmp_seqhi(tmp);
__be32 *seqhi = esp_tmp_extra(tmp);
esph->seq_no = esph->spi;
esph->spi = *seqhi;
@ -107,7 +113,11 @@ static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
static void esp_output_restore_header(struct sk_buff *skb)
{
esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32));
void *tmp = ESP_SKB_CB(skb)->tmp;
struct esp_output_extra *extra = esp_tmp_extra(tmp);
esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
sizeof(__be32));
}
static void esp_output_done_esn(struct crypto_async_request *base, int err)
@ -121,6 +131,7 @@ static void esp_output_done_esn(struct crypto_async_request *base, int err)
static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
{
int err;
struct esp_output_extra *extra;
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
struct aead_request *req;
@ -137,8 +148,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
int tfclen;
int nfrags;
int assoclen;
int seqhilen;
__be32 *seqhi;
int extralen;
__be64 seqno;
/* skb is pure payload to encrypt */
@ -166,21 +176,21 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
nfrags = err;
assoclen = sizeof(*esph);
seqhilen = 0;
extralen = 0;
if (x->props.flags & XFRM_STATE_ESN) {
seqhilen += sizeof(__be32);
assoclen += seqhilen;
extralen += sizeof(*extra);
assoclen += sizeof(__be32);
}
tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
tmp = esp_alloc_tmp(aead, nfrags, extralen);
if (!tmp) {
err = -ENOMEM;
goto error;
}
seqhi = esp_tmp_seqhi(tmp);
iv = esp_tmp_iv(aead, tmp, seqhilen);
extra = esp_tmp_extra(tmp);
iv = esp_tmp_iv(aead, tmp, extralen);
req = esp_tmp_req(aead, iv);
sg = esp_req_sg(aead, req);
@ -247,8 +257,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
* encryption.
*/
if ((x->props.flags & XFRM_STATE_ESN)) {
esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
*seqhi = esph->spi;
extra->esphoff = (unsigned char *)esph -
skb_transport_header(skb);
esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
extra->seqhi = esph->spi;
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
aead_request_set_callback(req, 0, esp_output_done_esn, skb);
}
@ -445,7 +457,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
goto out;
ESP_SKB_CB(skb)->tmp = tmp;
seqhi = esp_tmp_seqhi(tmp);
seqhi = esp_tmp_extra(tmp);
iv = esp_tmp_iv(aead, tmp, seqhilen);
req = esp_tmp_req(aead, iv);
sg = esp_req_sg(aead, req);

View File

@ -62,26 +62,26 @@ EXPORT_SYMBOL_GPL(gre_del_protocol);
/* Fills in tpi and returns header length to be pulled. */
int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
bool *csum_err, __be16 proto)
bool *csum_err, __be16 proto, int nhs)
{
const struct gre_base_hdr *greh;
__be32 *options;
int hdr_len;
if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
if (unlikely(!pskb_may_pull(skb, nhs + sizeof(struct gre_base_hdr))))
return -EINVAL;
greh = (struct gre_base_hdr *)skb_transport_header(skb);
greh = (struct gre_base_hdr *)(skb->data + nhs);
if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
return -EINVAL;
tpi->flags = gre_flags_to_tnl_flags(greh->flags);
hdr_len = gre_calc_hlen(tpi->flags);
if (!pskb_may_pull(skb, hdr_len))
if (!pskb_may_pull(skb, nhs + hdr_len))
return -EINVAL;
greh = (struct gre_base_hdr *)skb_transport_header(skb);
greh = (struct gre_base_hdr *)(skb->data + nhs);
tpi->proto = greh->protocol;
options = (__be32 *)(greh + 1);

View File

@ -49,12 +49,6 @@
#include <net/gre.h>
#include <net/dst_metadata.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
#include <net/ip6_fib.h>
#include <net/ip6_route.h>
#endif
/*
Problems & solutions
--------------------
@ -217,12 +211,14 @@ static void gre_err(struct sk_buff *skb, u32 info)
* by themselves???
*/
const struct iphdr *iph = (struct iphdr *)skb->data;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct tnl_ptk_info tpi;
bool csum_err = false;
if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP)) < 0) {
if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP),
iph->ihl * 4) < 0) {
if (!csum_err) /* ignore csum errors. */
return;
}
@ -338,7 +334,7 @@ static int gre_rcv(struct sk_buff *skb)
}
#endif
hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP));
hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
if (hdr_len < 0)
goto drop;
@ -1121,6 +1117,7 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
{
struct nlattr *tb[IFLA_MAX + 1];
struct net_device *dev;
LIST_HEAD(list_kill);
struct ip_tunnel *t;
int err;
@ -1136,8 +1133,10 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
t->collect_md = true;
err = ipgre_newlink(net, dev, tb, NULL);
if (err < 0)
goto out;
if (err < 0) {
free_netdev(dev);
return ERR_PTR(err);
}
/* openvswitch users expect packet sizes to be unrestricted,
* so set the largest MTU we can.
@ -1146,9 +1145,14 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
if (err)
goto out;
err = rtnl_configure_link(dev, NULL);
if (err < 0)
goto out;
return dev;
out:
free_netdev(dev);
ip_tunnel_dellink(dev, &list_kill);
unregister_netdevice_many(&list_kill);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(gretap_fb_dev_create);

View File

@ -127,7 +127,9 @@ __be32 ic_myaddr = NONE; /* My IP address */
static __be32 ic_netmask = NONE; /* Netmask for local subnet */
__be32 ic_gateway = NONE; /* Gateway IP address */
__be32 ic_addrservaddr = NONE; /* IP Address of the IP addresses'server */
#ifdef IPCONFIG_DYNAMIC
static __be32 ic_addrservaddr = NONE; /* IP Address of the IP addresses'server */
#endif
__be32 ic_servaddr = NONE; /* Boot server IP address */

View File

@ -891,8 +891,10 @@ static struct mfc_cache *ipmr_cache_alloc(void)
{
struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
if (c)
if (c) {
c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
c->mfc_un.res.minvif = MAXVIFS;
}
return c;
}

View File

@ -2751,7 +2751,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
struct sk_buff *hole = NULL;
u32 last_lost;
u32 max_segs, last_lost;
int mib_idx;
int fwd_rexmitting = 0;
@ -2771,6 +2771,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
last_lost = tp->snd_una;
}
max_segs = tcp_tso_autosize(sk, tcp_current_mss(sk));
tcp_for_write_queue_from(skb, sk) {
__u8 sacked = TCP_SKB_CB(skb)->sacked;
int segs;
@ -2784,6 +2785,10 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
if (segs <= 0)
return;
/* In case tcp_shift_skb_data() have aggregated large skbs,
* we need to make sure not sending too bigs TSO packets
*/
segs = min_t(int, segs, max_segs);
if (fwd_rexmitting) {
begin_fwd:

View File

@ -391,9 +391,9 @@ int udp_v4_get_port(struct sock *sk, unsigned short snum)
return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr);
}
static inline int compute_score(struct sock *sk, struct net *net,
__be32 saddr, unsigned short hnum, __be16 sport,
__be32 daddr, __be16 dport, int dif)
static int compute_score(struct sock *sk, struct net *net,
__be32 saddr, __be16 sport,
__be32 daddr, unsigned short hnum, int dif)
{
int score;
struct inet_sock *inet;
@ -434,52 +434,6 @@ static inline int compute_score(struct sock *sk, struct net *net,
return score;
}
/*
* In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num)
*/
static inline int compute_score2(struct sock *sk, struct net *net,
__be32 saddr, __be16 sport,
__be32 daddr, unsigned int hnum, int dif)
{
int score;
struct inet_sock *inet;
if (!net_eq(sock_net(sk), net) ||
ipv6_only_sock(sk))
return -1;
inet = inet_sk(sk);
if (inet->inet_rcv_saddr != daddr ||
inet->inet_num != hnum)
return -1;
score = (sk->sk_family == PF_INET) ? 2 : 1;
if (inet->inet_daddr) {
if (inet->inet_daddr != saddr)
return -1;
score += 4;
}
if (inet->inet_dport) {
if (inet->inet_dport != sport)
return -1;
score += 4;
}
if (sk->sk_bound_dev_if) {
if (sk->sk_bound_dev_if != dif)
return -1;
score += 4;
}
if (sk->sk_incoming_cpu == raw_smp_processor_id())
score++;
return score;
}
static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
const __u16 lport, const __be32 faddr,
const __be16 fport)
@ -492,11 +446,11 @@ static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
udp_ehash_secret + net_hash_mix(net));
}
/* called with read_rcu_lock() */
/* called with rcu_read_lock() */
static struct sock *udp4_lib_lookup2(struct net *net,
__be32 saddr, __be16 sport,
__be32 daddr, unsigned int hnum, int dif,
struct udp_hslot *hslot2, unsigned int slot2,
struct udp_hslot *hslot2,
struct sk_buff *skb)
{
struct sock *sk, *result;
@ -506,7 +460,7 @@ static struct sock *udp4_lib_lookup2(struct net *net,
result = NULL;
badness = 0;
udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
score = compute_score2(sk, net, saddr, sport,
score = compute_score(sk, net, saddr, sport,
daddr, hnum, dif);
if (score > badness) {
reuseport = sk->sk_reuseport;
@ -554,17 +508,22 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
result = udp4_lib_lookup2(net, saddr, sport,
daddr, hnum, dif,
hslot2, slot2, skb);
hslot2, skb);
if (!result) {
unsigned int old_slot2 = slot2;
hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
slot2 = hash2 & udptable->mask;
/* avoid searching the same slot again. */
if (unlikely(slot2 == old_slot2))
return result;
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
goto begin;
result = udp4_lib_lookup2(net, saddr, sport,
htonl(INADDR_ANY), hnum, dif,
hslot2, slot2, skb);
daddr, hnum, dif,
hslot2, skb);
}
return result;
}
@ -572,8 +531,8 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
result = NULL;
badness = 0;
sk_for_each_rcu(sk, &hslot->head) {
score = compute_score(sk, net, saddr, hnum, sport,
daddr, dport, dif);
score = compute_score(sk, net, saddr, sport,
daddr, hnum, dif);
if (score > badness) {
reuseport = sk->sk_reuseport;
if (reuseport) {
@ -1755,8 +1714,11 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
return err;
}
return skb_checksum_init_zero_check(skb, proto, uh->check,
inet_compute_pseudo);
/* Note, we are only interested in != 0 or == 0, thus the
* force to int.
*/
return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
inet_compute_pseudo);
}
/*

Some files were not shown because too many files have changed in this diff Show More