mirror of https://gitee.com/openkylin/linux.git
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Ensure that mtu is at least IPV6_MIN_MTU in ipv6 VTI tunnel driver, from Steffen Klassert. 2) Fix crashes when user tries to get_next_key on an LPM bpf map, from Alexei Starovoitov. 3) Fix detection of VLAN fitlering feature for bnx2x VF devices, from Michal Schmidt. 4) We can get a divide by zero when TCP socket are morphed into listening state, fix from Eric Dumazet. 5) Fix socket refcounting bugs in skb_complete_wifi_ack() and skb_complete_tx_timestamp(). From Eric Dumazet. 6) Use after free in dccp_feat_activate_values(), also from Eric Dumazet. 7) Like bonding team needs to use ETH_MAX_MTU as netdev->max_mtu, from Jarod Wilson. 8) Fix use after free in vrf_xmit(), from David Ahern. 9) Don't do UDP Fragmentation Offload on IPComp ipsec packets, from Alexey Kodanev. 10) Properly check napi_complete_done() return value in order to decide whether to re-enable IRQs or not in amd-xgbe driver, from Thomas Lendacky. 11) Fix double free of hwmon device in marvell phy driver, from Andrew Lunn. 12) Don't crash on malformed netlink attributes in act_connmark, from Etienne Noss. 13) Don't remove routes with a higher metric in ipv6 ECMP route replace, from Sabrina Dubroca. 14) Don't write into a cloned SKB in ipv6 fragmentation handling, from Florian Westphal. 15) Fix routing redirect races in dccp and tcp, basically the ICMP handler can't modify the socket's cached route in it's locked by the user at this moment. From Jon Maxwell. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (108 commits) qed: Enable iSCSI Out-of-Order qed: Correct out-of-bound access in OOO history qed: Fix interrupt flags on Rx LL2 qed: Free previous connections when releasing iSCSI qed: Fix mapping leak on LL2 rx flow qed: Prevent creation of too-big u32-chains qed: Align CIDs according to DORQ requirement mlxsw: reg: Fix SPVMLR max record count mlxsw: reg: Fix SPVM max record count net: Resend IGMP memberships upon peer notification. dccp: fix memory leak during tear-down of unsuccessful connection request tun: fix premature POLLOUT notification on tun devices dccp/tcp: fix routing redirect race ucc/hdlc: fix two little issue vxlan: fix ovs support net: use net->count to check whether a netns is alive or not bridge: drop netfilter fake rtable unconditionally ipv6: avoid write to a possibly cloned skb net: wimax/i2400m: fix NULL-deref at probe isdn/gigaset: fix NULL-deref at probe ...
This commit is contained in:
commit
ae50dfd616
|
@ -71,6 +71,9 @@
|
|||
For Axon it can be absent, though my current driver
|
||||
doesn't handle phy-address yet so for now, keep
|
||||
0x00ffffff in it.
|
||||
- phy-handle : Used to describe configurations where a external PHY
|
||||
is used. Please refer to:
|
||||
Documentation/devicetree/bindings/net/ethernet.txt
|
||||
- rx-fifo-size-gige : 1 cell, Rx fifo size in bytes for 1000 Mb/sec
|
||||
operations (if absent the value is the same as
|
||||
rx-fifo-size). For Axon, either absent or 2048.
|
||||
|
@ -81,8 +84,22 @@
|
|||
offload, phandle of the TAH device node.
|
||||
- tah-channel : 1 cell, optional. If appropriate, channel used on the
|
||||
TAH engine.
|
||||
- fixed-link : Fixed-link subnode describing a link to a non-MDIO
|
||||
managed entity. See
|
||||
Documentation/devicetree/bindings/net/fixed-link.txt
|
||||
for details.
|
||||
- mdio subnode : When the EMAC has a phy connected to its local
|
||||
mdio, which us supported by the kernel's network
|
||||
PHY library in drivers/net/phy, there must be device
|
||||
tree subnode with the following required properties:
|
||||
- #address-cells: Must be <1>.
|
||||
- #size-cells: Must be <0>.
|
||||
|
||||
Example:
|
||||
For PHY definitions: Please refer to
|
||||
Documentation/devicetree/bindings/net/phy.txt and
|
||||
Documentation/devicetree/bindings/net/ethernet.txt
|
||||
|
||||
Examples:
|
||||
|
||||
EMAC0: ethernet@40000800 {
|
||||
device_type = "network";
|
||||
|
@ -104,6 +121,48 @@
|
|||
zmii-channel = <0>;
|
||||
};
|
||||
|
||||
EMAC1: ethernet@ef600c00 {
|
||||
device_type = "network";
|
||||
compatible = "ibm,emac-apm821xx", "ibm,emac4sync";
|
||||
interrupt-parent = <&EMAC1>;
|
||||
interrupts = <0 1>;
|
||||
#interrupt-cells = <1>;
|
||||
#address-cells = <0>;
|
||||
#size-cells = <0>;
|
||||
interrupt-map = <0 &UIC2 0x10 IRQ_TYPE_LEVEL_HIGH /* Status */
|
||||
1 &UIC2 0x14 IRQ_TYPE_LEVEL_HIGH /* Wake */>;
|
||||
reg = <0xef600c00 0x000000c4>;
|
||||
local-mac-address = [000000000000]; /* Filled in by U-Boot */
|
||||
mal-device = <&MAL0>;
|
||||
mal-tx-channel = <0>;
|
||||
mal-rx-channel = <0>;
|
||||
cell-index = <0>;
|
||||
max-frame-size = <9000>;
|
||||
rx-fifo-size = <16384>;
|
||||
tx-fifo-size = <2048>;
|
||||
fifo-entry-size = <10>;
|
||||
phy-mode = "rgmii";
|
||||
phy-handle = <&phy0>;
|
||||
phy-map = <0x00000000>;
|
||||
rgmii-device = <&RGMII0>;
|
||||
rgmii-channel = <0>;
|
||||
tah-device = <&TAH0>;
|
||||
tah-channel = <0>;
|
||||
has-inverted-stacr-oc;
|
||||
has-new-stacr-staopc;
|
||||
|
||||
mdio {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
phy0: ethernet-phy@0 {
|
||||
compatible = "ethernet-phy-ieee802.3-c22";
|
||||
reg = <0>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
ii) McMAL node
|
||||
|
||||
Required properties:
|
||||
|
@ -145,4 +204,3 @@
|
|||
- revision : as provided by the RGMII new version register if
|
||||
available.
|
||||
For Axon: 0x0000012a
|
||||
|
||||
|
|
|
@ -1006,7 +1006,8 @@ accept_redirects - BOOLEAN
|
|||
FALSE (router)
|
||||
|
||||
forwarding - BOOLEAN
|
||||
Enable IP forwarding on this interface.
|
||||
Enable IP forwarding on this interface. This controls whether packets
|
||||
received _on_ this interface can be forwarded.
|
||||
|
||||
mc_forwarding - BOOLEAN
|
||||
Do multicast routing. The kernel needs to be compiled with CONFIG_MROUTE
|
||||
|
|
|
@ -266,7 +266,7 @@ static int alg_setsockopt(struct socket *sock, int level, int optname,
|
|||
return err;
|
||||
}
|
||||
|
||||
int af_alg_accept(struct sock *sk, struct socket *newsock)
|
||||
int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
|
||||
{
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
const struct af_alg_type *type;
|
||||
|
@ -281,7 +281,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
|
|||
if (!type)
|
||||
goto unlock;
|
||||
|
||||
sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, 0);
|
||||
sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, kern);
|
||||
err = -ENOMEM;
|
||||
if (!sk2)
|
||||
goto unlock;
|
||||
|
@ -323,9 +323,10 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(af_alg_accept);
|
||||
|
||||
static int alg_accept(struct socket *sock, struct socket *newsock, int flags)
|
||||
static int alg_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
bool kern)
|
||||
{
|
||||
return af_alg_accept(sock->sk, newsock);
|
||||
return af_alg_accept(sock->sk, newsock, kern);
|
||||
}
|
||||
|
||||
static const struct proto_ops alg_proto_ops = {
|
||||
|
|
|
@ -239,7 +239,8 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
|||
return err ?: len;
|
||||
}
|
||||
|
||||
static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
|
||||
static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
bool kern)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
|
@ -260,7 +261,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
err = af_alg_accept(ask->parent, newsock);
|
||||
err = af_alg_accept(ask->parent, newsock, kern);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -378,7 +379,7 @@ static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
|
|||
}
|
||||
|
||||
static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
|
||||
int flags)
|
||||
int flags, bool kern)
|
||||
{
|
||||
int err;
|
||||
|
||||
|
@ -386,7 +387,7 @@ static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
return hash_accept(sock, newsock, flags);
|
||||
return hash_accept(sock, newsock, flags, kern);
|
||||
}
|
||||
|
||||
static struct proto_ops algif_hash_ops_nokey = {
|
||||
|
|
|
@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface,
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (hostif->desc.bNumEndpoints < 1)
|
||||
return -ENODEV;
|
||||
|
||||
dev_info(&udev->dev,
|
||||
"%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
|
||||
__func__, le16_to_cpu(udev->descriptor.idVendor),
|
||||
|
|
|
@ -2272,10 +2272,7 @@ static int xgbe_one_poll(struct napi_struct *napi, int budget)
|
|||
processed = xgbe_rx_poll(channel, budget);
|
||||
|
||||
/* If we processed everything, we are done */
|
||||
if (processed < budget) {
|
||||
/* Turn off polling */
|
||||
napi_complete_done(napi, processed);
|
||||
|
||||
if ((processed < budget) && napi_complete_done(napi, processed)) {
|
||||
/* Enable Tx and Rx interrupts */
|
||||
if (pdata->channel_irq_mode)
|
||||
xgbe_enable_rx_tx_int(pdata, channel);
|
||||
|
@ -2317,10 +2314,7 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget)
|
|||
} while ((processed < budget) && (processed != last_processed));
|
||||
|
||||
/* If we processed everything, we are done */
|
||||
if (processed < budget) {
|
||||
/* Turn off polling */
|
||||
napi_complete_done(napi, processed);
|
||||
|
||||
if ((processed < budget) && napi_complete_done(napi, processed)) {
|
||||
/* Enable Tx and Rx interrupts */
|
||||
xgbe_enable_rx_tx_ints(pdata);
|
||||
}
|
||||
|
|
|
@ -213,9 +213,9 @@ void aq_pci_func_free_irqs(struct aq_pci_func_s *self)
|
|||
if (!((1U << i) & self->msix_entry_mask))
|
||||
continue;
|
||||
|
||||
free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
|
||||
if (pdev->msix_enabled)
|
||||
irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
|
||||
free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
|
||||
self->msix_entry_mask &= ~(1U << i);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -13292,17 +13292,15 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
|
|||
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
|
||||
|
||||
/* VF with OLD Hypervisor or old PF do not support filtering */
|
||||
if (IS_PF(bp)) {
|
||||
if (chip_is_e1x)
|
||||
bp->accept_any_vlan = true;
|
||||
else
|
||||
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
#ifdef CONFIG_BNX2X_SRIOV
|
||||
} else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
|
||||
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
#endif
|
||||
}
|
||||
/* For VF we'll know whether to enable VLAN filtering after
|
||||
* getting a response to CHANNEL_TLV_ACQUIRE from PF.
|
||||
*/
|
||||
|
||||
dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
|
||||
dev->features |= NETIF_F_HIGHDMA;
|
||||
|
@ -13738,7 +13736,7 @@ static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
|
|||
if (!netif_running(bp->dev)) {
|
||||
DP(BNX2X_MSG_PTP,
|
||||
"PTP adjfreq called while the interface is down\n");
|
||||
return -EFAULT;
|
||||
return -ENETDOWN;
|
||||
}
|
||||
|
||||
if (ppb < 0) {
|
||||
|
@ -13797,6 +13795,12 @@ static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
|
|||
{
|
||||
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
|
||||
|
||||
if (!netif_running(bp->dev)) {
|
||||
DP(BNX2X_MSG_PTP,
|
||||
"PTP adjtime called while the interface is down\n");
|
||||
return -ENETDOWN;
|
||||
}
|
||||
|
||||
DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
|
||||
|
||||
timecounter_adjtime(&bp->timecounter, delta);
|
||||
|
@ -13809,6 +13813,12 @@ static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
|
|||
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
|
||||
u64 ns;
|
||||
|
||||
if (!netif_running(bp->dev)) {
|
||||
DP(BNX2X_MSG_PTP,
|
||||
"PTP gettime called while the interface is down\n");
|
||||
return -ENETDOWN;
|
||||
}
|
||||
|
||||
ns = timecounter_read(&bp->timecounter);
|
||||
|
||||
DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
|
||||
|
@ -13824,6 +13834,12 @@ static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
|
|||
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
|
||||
u64 ns;
|
||||
|
||||
if (!netif_running(bp->dev)) {
|
||||
DP(BNX2X_MSG_PTP,
|
||||
"PTP settime called while the interface is down\n");
|
||||
return -ENETDOWN;
|
||||
}
|
||||
|
||||
ns = timespec64_to_ns(ts);
|
||||
|
||||
DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
|
||||
|
@ -13991,6 +14007,14 @@ static int bnx2x_init_one(struct pci_dev *pdev,
|
|||
rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
|
||||
if (rc)
|
||||
goto init_one_freemem;
|
||||
|
||||
#ifdef CONFIG_BNX2X_SRIOV
|
||||
/* VF with OLD Hypervisor or old PF do not support filtering */
|
||||
if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
|
||||
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Enable SRIOV if capability found in configuration space */
|
||||
|
|
|
@ -434,7 +434,9 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
|
|||
|
||||
/* Add/Remove the filter */
|
||||
rc = bnx2x_config_vlan_mac(bp, &ramrod);
|
||||
if (rc && rc != -EEXIST) {
|
||||
if (rc == -EEXIST)
|
||||
return 0;
|
||||
if (rc) {
|
||||
BNX2X_ERR("Failed to %s %s\n",
|
||||
filter->add ? "add" : "delete",
|
||||
(filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
|
||||
|
@ -444,6 +446,8 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
|
|||
return rc;
|
||||
}
|
||||
|
||||
filter->applied = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -469,8 +473,10 @@ int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
|||
/* Rollback if needed */
|
||||
if (i != filters->count) {
|
||||
BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
|
||||
i, filters->count + 1);
|
||||
i, filters->count);
|
||||
while (--i >= 0) {
|
||||
if (!filters->filters[i].applied)
|
||||
continue;
|
||||
filters->filters[i].add = !filters->filters[i].add;
|
||||
bnx2x_vf_mac_vlan_config(bp, vf, qid,
|
||||
&filters->filters[i],
|
||||
|
@ -1899,7 +1905,8 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
|
|||
continue;
|
||||
}
|
||||
|
||||
DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
|
||||
DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
|
||||
"add addresses for vf %d\n", vf->abs_vfid);
|
||||
for_each_vfq(vf, j) {
|
||||
struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
|
||||
|
||||
|
@ -1920,11 +1927,12 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
|
|||
cpu_to_le32(U64_HI(q_stats_addr));
|
||||
cur_query_entry->address.lo =
|
||||
cpu_to_le32(U64_LO(q_stats_addr));
|
||||
DP(BNX2X_MSG_IOV,
|
||||
"added address %x %x for vf %d queue %d client %d\n",
|
||||
cur_query_entry->address.hi,
|
||||
cur_query_entry->address.lo, cur_query_entry->funcID,
|
||||
j, cur_query_entry->index);
|
||||
DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
|
||||
"added address %x %x for vf %d queue %d client %d\n",
|
||||
cur_query_entry->address.hi,
|
||||
cur_query_entry->address.lo,
|
||||
cur_query_entry->funcID,
|
||||
j, cur_query_entry->index);
|
||||
cur_query_entry++;
|
||||
cur_data_offset += sizeof(struct per_queue_stats);
|
||||
stats_count++;
|
||||
|
|
|
@ -114,6 +114,7 @@ struct bnx2x_vf_mac_vlan_filter {
|
|||
(BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/
|
||||
|
||||
bool add;
|
||||
bool applied;
|
||||
u8 *mac;
|
||||
u16 vid;
|
||||
};
|
||||
|
|
|
@ -868,7 +868,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
|
|||
struct bnx2x *bp = netdev_priv(dev);
|
||||
struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
|
||||
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
|
||||
int rc, i = 0;
|
||||
int rc = 0, i = 0;
|
||||
struct netdev_hw_addr *ha;
|
||||
|
||||
if (bp->state != BNX2X_STATE_OPEN) {
|
||||
|
@ -883,6 +883,15 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
|
|||
/* Get Rx mode requested */
|
||||
DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
|
||||
|
||||
/* We support PFVF_MAX_MULTICAST_PER_VF mcast addresses tops */
|
||||
if (netdev_mc_count(dev) > PFVF_MAX_MULTICAST_PER_VF) {
|
||||
DP(NETIF_MSG_IFUP,
|
||||
"VF supports not more than %d multicast MAC addresses\n",
|
||||
PFVF_MAX_MULTICAST_PER_VF);
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
netdev_for_each_mc_addr(ha, dev) {
|
||||
DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
|
||||
bnx2x_mc_addr(ha));
|
||||
|
@ -890,16 +899,6 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
|
|||
i++;
|
||||
}
|
||||
|
||||
/* We support four PFVF_MAX_MULTICAST_PER_VF mcast
|
||||
* addresses tops
|
||||
*/
|
||||
if (i >= PFVF_MAX_MULTICAST_PER_VF) {
|
||||
DP(NETIF_MSG_IFUP,
|
||||
"VF supports not more than %d multicast MAC addresses\n",
|
||||
PFVF_MAX_MULTICAST_PER_VF);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
req->n_multicast = i;
|
||||
req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
|
||||
req->vf_qid = 0;
|
||||
|
@ -924,7 +923,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
|
|||
out:
|
||||
bnx2x_vfpf_finalize(bp, &req->first_tlv);
|
||||
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* request pf to add a vlan for the vf */
|
||||
|
@ -1778,6 +1777,23 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|||
goto op_err;
|
||||
}
|
||||
|
||||
/* build vlan list */
|
||||
fl = NULL;
|
||||
|
||||
rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
|
||||
VFPF_VLAN_FILTER);
|
||||
if (rc)
|
||||
goto op_err;
|
||||
|
||||
if (fl) {
|
||||
/* set vlan list */
|
||||
rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
|
||||
msg->vf_qid,
|
||||
false);
|
||||
if (rc)
|
||||
goto op_err;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
|
||||
|
|
|
@ -4465,6 +4465,10 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
|
|||
vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
|
||||
}
|
||||
#endif
|
||||
if (BNXT_PF(bp) && (le16_to_cpu(resp->flags) &
|
||||
FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED))
|
||||
bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
|
||||
|
||||
switch (resp->port_partition_type) {
|
||||
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
|
||||
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
|
||||
|
@ -5507,8 +5511,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
|
|||
bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
|
||||
PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
|
||||
}
|
||||
link_info->support_auto_speeds =
|
||||
le16_to_cpu(resp->supported_speeds_auto_mode);
|
||||
if (resp->supported_speeds_auto_mode)
|
||||
link_info->support_auto_speeds =
|
||||
le16_to_cpu(resp->supported_speeds_auto_mode);
|
||||
|
||||
hwrm_phy_qcaps_exit:
|
||||
mutex_unlock(&bp->hwrm_cmd_lock);
|
||||
|
@ -6495,8 +6500,14 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent)
|
|||
if (!silent)
|
||||
bnxt_dbg_dump_states(bp);
|
||||
if (netif_running(bp->dev)) {
|
||||
int rc;
|
||||
|
||||
if (!silent)
|
||||
bnxt_ulp_stop(bp);
|
||||
bnxt_close_nic(bp, false, false);
|
||||
bnxt_open_nic(bp, false, false);
|
||||
rc = bnxt_open_nic(bp, false, false);
|
||||
if (!silent && !rc)
|
||||
bnxt_ulp_start(bp);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -7444,6 +7455,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
if (rc)
|
||||
goto init_err_pci_clean;
|
||||
|
||||
rc = bnxt_hwrm_func_reset(bp);
|
||||
if (rc)
|
||||
goto init_err_pci_clean;
|
||||
|
||||
bnxt_hwrm_fw_set_time(bp);
|
||||
|
||||
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
|
||||
|
@ -7554,10 +7569,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
if (rc)
|
||||
goto init_err_pci_clean;
|
||||
|
||||
rc = bnxt_hwrm_func_reset(bp);
|
||||
if (rc)
|
||||
goto init_err_pci_clean;
|
||||
|
||||
rc = bnxt_init_int_mode(bp);
|
||||
if (rc)
|
||||
goto init_err_pci_clean;
|
||||
|
|
|
@ -993,6 +993,7 @@ struct bnxt {
|
|||
BNXT_FLAG_ROCEV2_CAP)
|
||||
#define BNXT_FLAG_NO_AGG_RINGS 0x20000
|
||||
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
|
||||
#define BNXT_FLAG_FW_LLDP_AGENT 0x80000
|
||||
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
|
||||
|
||||
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
|
||||
|
|
|
@ -474,7 +474,7 @@ void bnxt_dcb_init(struct bnxt *bp)
|
|||
return;
|
||||
|
||||
bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
|
||||
if (BNXT_PF(bp))
|
||||
if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
|
||||
bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
|
||||
else
|
||||
bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* Broadcom GENET (Gigabit Ethernet) controller driver
|
||||
*
|
||||
* Copyright (c) 2014 Broadcom Corporation
|
||||
* Copyright (c) 2014-2017 Broadcom
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
|
@ -450,6 +450,22 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
|
|||
genet_dma_ring_regs[r]);
|
||||
}
|
||||
|
||||
static int bcmgenet_begin(struct net_device *dev)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
|
||||
/* Turn on the clock */
|
||||
return clk_prepare_enable(priv->clk);
|
||||
}
|
||||
|
||||
static void bcmgenet_complete(struct net_device *dev)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
|
||||
/* Turn off the clock */
|
||||
clk_disable_unprepare(priv->clk);
|
||||
}
|
||||
|
||||
static int bcmgenet_get_link_ksettings(struct net_device *dev,
|
||||
struct ethtool_link_ksettings *cmd)
|
||||
{
|
||||
|
@ -778,8 +794,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
|
|||
STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
|
||||
/* Misc UniMAC counters */
|
||||
STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
|
||||
UMAC_RBUF_OVFL_CNT),
|
||||
STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
|
||||
UMAC_RBUF_OVFL_CNT_V1),
|
||||
STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
|
||||
UMAC_RBUF_ERR_CNT_V1),
|
||||
STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
|
||||
STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
|
||||
STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
|
||||
|
@ -821,6 +838,45 @@ static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
|
|||
}
|
||||
}
|
||||
|
||||
static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
|
||||
{
|
||||
u16 new_offset;
|
||||
u32 val;
|
||||
|
||||
switch (offset) {
|
||||
case UMAC_RBUF_OVFL_CNT_V1:
|
||||
if (GENET_IS_V2(priv))
|
||||
new_offset = RBUF_OVFL_CNT_V2;
|
||||
else
|
||||
new_offset = RBUF_OVFL_CNT_V3PLUS;
|
||||
|
||||
val = bcmgenet_rbuf_readl(priv, new_offset);
|
||||
/* clear if overflowed */
|
||||
if (val == ~0)
|
||||
bcmgenet_rbuf_writel(priv, 0, new_offset);
|
||||
break;
|
||||
case UMAC_RBUF_ERR_CNT_V1:
|
||||
if (GENET_IS_V2(priv))
|
||||
new_offset = RBUF_ERR_CNT_V2;
|
||||
else
|
||||
new_offset = RBUF_ERR_CNT_V3PLUS;
|
||||
|
||||
val = bcmgenet_rbuf_readl(priv, new_offset);
|
||||
/* clear if overflowed */
|
||||
if (val == ~0)
|
||||
bcmgenet_rbuf_writel(priv, 0, new_offset);
|
||||
break;
|
||||
default:
|
||||
val = bcmgenet_umac_readl(priv, offset);
|
||||
/* clear if overflowed */
|
||||
if (val == ~0)
|
||||
bcmgenet_umac_writel(priv, 0, offset);
|
||||
break;
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
|
||||
{
|
||||
int i, j = 0;
|
||||
|
@ -836,19 +892,28 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
|
|||
case BCMGENET_STAT_NETDEV:
|
||||
case BCMGENET_STAT_SOFT:
|
||||
continue;
|
||||
case BCMGENET_STAT_MIB_RX:
|
||||
case BCMGENET_STAT_MIB_TX:
|
||||
case BCMGENET_STAT_RUNT:
|
||||
if (s->type != BCMGENET_STAT_MIB_RX)
|
||||
offset = BCMGENET_STAT_OFFSET;
|
||||
offset += BCMGENET_STAT_OFFSET;
|
||||
/* fall through */
|
||||
case BCMGENET_STAT_MIB_TX:
|
||||
offset += BCMGENET_STAT_OFFSET;
|
||||
/* fall through */
|
||||
case BCMGENET_STAT_MIB_RX:
|
||||
val = bcmgenet_umac_readl(priv,
|
||||
UMAC_MIB_START + j + offset);
|
||||
offset = 0; /* Reset Offset */
|
||||
break;
|
||||
case BCMGENET_STAT_MISC:
|
||||
val = bcmgenet_umac_readl(priv, s->reg_offset);
|
||||
/* clear if overflowed */
|
||||
if (val == ~0)
|
||||
bcmgenet_umac_writel(priv, 0, s->reg_offset);
|
||||
if (GENET_IS_V1(priv)) {
|
||||
val = bcmgenet_umac_readl(priv, s->reg_offset);
|
||||
/* clear if overflowed */
|
||||
if (val == ~0)
|
||||
bcmgenet_umac_writel(priv, 0,
|
||||
s->reg_offset);
|
||||
} else {
|
||||
val = bcmgenet_update_stat_misc(priv,
|
||||
s->reg_offset);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -973,6 +1038,8 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
|
|||
|
||||
/* standard ethtool support functions. */
|
||||
static const struct ethtool_ops bcmgenet_ethtool_ops = {
|
||||
.begin = bcmgenet_begin,
|
||||
.complete = bcmgenet_complete,
|
||||
.get_strings = bcmgenet_get_strings,
|
||||
.get_sset_count = bcmgenet_get_sset_count,
|
||||
.get_ethtool_stats = bcmgenet_get_ethtool_stats,
|
||||
|
@ -1167,7 +1234,6 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
|
|||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
struct device *kdev = &priv->pdev->dev;
|
||||
struct enet_cb *tx_cb_ptr;
|
||||
struct netdev_queue *txq;
|
||||
unsigned int pkts_compl = 0;
|
||||
unsigned int bytes_compl = 0;
|
||||
unsigned int c_index;
|
||||
|
@ -1219,13 +1285,8 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
|
|||
dev->stats.tx_packets += pkts_compl;
|
||||
dev->stats.tx_bytes += bytes_compl;
|
||||
|
||||
txq = netdev_get_tx_queue(dev, ring->queue);
|
||||
netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
|
||||
|
||||
if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
|
||||
if (netif_tx_queue_stopped(txq))
|
||||
netif_tx_wake_queue(txq);
|
||||
}
|
||||
netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
|
||||
pkts_compl, bytes_compl);
|
||||
|
||||
return pkts_compl;
|
||||
}
|
||||
|
@ -1248,8 +1309,16 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
|
|||
struct bcmgenet_tx_ring *ring =
|
||||
container_of(napi, struct bcmgenet_tx_ring, napi);
|
||||
unsigned int work_done = 0;
|
||||
struct netdev_queue *txq;
|
||||
unsigned long flags;
|
||||
|
||||
work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
|
||||
spin_lock_irqsave(&ring->lock, flags);
|
||||
work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
|
||||
if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
|
||||
txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
|
||||
netif_tx_wake_queue(txq);
|
||||
}
|
||||
spin_unlock_irqrestore(&ring->lock, flags);
|
||||
|
||||
if (work_done == 0) {
|
||||
napi_complete(napi);
|
||||
|
@ -2457,24 +2526,28 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
|
|||
/* Interrupt bottom half */
|
||||
static void bcmgenet_irq_task(struct work_struct *work)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int status;
|
||||
struct bcmgenet_priv *priv = container_of(
|
||||
work, struct bcmgenet_priv, bcmgenet_irq_work);
|
||||
|
||||
netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
|
||||
|
||||
if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
|
||||
priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
status = priv->irq0_stat;
|
||||
priv->irq0_stat = 0;
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
if (status & UMAC_IRQ_MPD_R) {
|
||||
netif_dbg(priv, wol, priv->dev,
|
||||
"magic packet detected, waking up\n");
|
||||
bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
|
||||
}
|
||||
|
||||
/* Link UP/DOWN event */
|
||||
if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
|
||||
if (status & UMAC_IRQ_LINK_EVENT)
|
||||
phy_mac_interrupt(priv->phydev,
|
||||
!!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
|
||||
priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
|
||||
}
|
||||
!!(status & UMAC_IRQ_LINK_UP));
|
||||
}
|
||||
|
||||
/* bcmgenet_isr1: handle Rx and Tx priority queues */
|
||||
|
@ -2483,22 +2556,21 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
|
|||
struct bcmgenet_priv *priv = dev_id;
|
||||
struct bcmgenet_rx_ring *rx_ring;
|
||||
struct bcmgenet_tx_ring *tx_ring;
|
||||
unsigned int index;
|
||||
unsigned int index, status;
|
||||
|
||||
/* Save irq status for bottom-half processing. */
|
||||
priv->irq1_stat =
|
||||
bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
|
||||
/* Read irq status */
|
||||
status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
|
||||
~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
|
||||
|
||||
/* clear interrupts */
|
||||
bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
|
||||
bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
|
||||
|
||||
netif_dbg(priv, intr, priv->dev,
|
||||
"%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
|
||||
"%s: IRQ=0x%x\n", __func__, status);
|
||||
|
||||
/* Check Rx priority queue interrupts */
|
||||
for (index = 0; index < priv->hw_params->rx_queues; index++) {
|
||||
if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
|
||||
if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
|
||||
continue;
|
||||
|
||||
rx_ring = &priv->rx_rings[index];
|
||||
|
@ -2511,7 +2583,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
|
|||
|
||||
/* Check Tx priority queue interrupts */
|
||||
for (index = 0; index < priv->hw_params->tx_queues; index++) {
|
||||
if (!(priv->irq1_stat & BIT(index)))
|
||||
if (!(status & BIT(index)))
|
||||
continue;
|
||||
|
||||
tx_ring = &priv->tx_rings[index];
|
||||
|
@ -2531,19 +2603,20 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
|
|||
struct bcmgenet_priv *priv = dev_id;
|
||||
struct bcmgenet_rx_ring *rx_ring;
|
||||
struct bcmgenet_tx_ring *tx_ring;
|
||||
unsigned int status;
|
||||
unsigned long flags;
|
||||
|
||||
/* Save irq status for bottom-half processing. */
|
||||
priv->irq0_stat =
|
||||
bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
|
||||
/* Read irq status */
|
||||
status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
|
||||
~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
|
||||
|
||||
/* clear interrupts */
|
||||
bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
|
||||
bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
|
||||
|
||||
netif_dbg(priv, intr, priv->dev,
|
||||
"IRQ=0x%x\n", priv->irq0_stat);
|
||||
"IRQ=0x%x\n", status);
|
||||
|
||||
if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) {
|
||||
if (status & UMAC_IRQ_RXDMA_DONE) {
|
||||
rx_ring = &priv->rx_rings[DESC_INDEX];
|
||||
|
||||
if (likely(napi_schedule_prep(&rx_ring->napi))) {
|
||||
|
@ -2552,7 +2625,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
|
|||
}
|
||||
}
|
||||
|
||||
if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) {
|
||||
if (status & UMAC_IRQ_TXDMA_DONE) {
|
||||
tx_ring = &priv->tx_rings[DESC_INDEX];
|
||||
|
||||
if (likely(napi_schedule_prep(&tx_ring->napi))) {
|
||||
|
@ -2561,20 +2634,21 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
|
|||
}
|
||||
}
|
||||
|
||||
if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
|
||||
UMAC_IRQ_PHY_DET_F |
|
||||
UMAC_IRQ_LINK_EVENT |
|
||||
UMAC_IRQ_HFB_SM |
|
||||
UMAC_IRQ_HFB_MM |
|
||||
UMAC_IRQ_MPD_R)) {
|
||||
/* all other interested interrupts handled in bottom half */
|
||||
schedule_work(&priv->bcmgenet_irq_work);
|
||||
if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
|
||||
status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
|
||||
wake_up(&priv->wq);
|
||||
}
|
||||
|
||||
if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
|
||||
priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
|
||||
priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
|
||||
wake_up(&priv->wq);
|
||||
/* all other interested interrupts handled in bottom half */
|
||||
status &= (UMAC_IRQ_LINK_EVENT |
|
||||
UMAC_IRQ_MPD_R);
|
||||
if (status) {
|
||||
/* Save irq status for bottom-half processing. */
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
priv->irq0_stat |= status;
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
schedule_work(&priv->bcmgenet_irq_work);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
@ -2801,6 +2875,8 @@ static int bcmgenet_open(struct net_device *dev)
|
|||
err_fini_dma:
|
||||
bcmgenet_fini_dma(priv);
|
||||
err_clk_disable:
|
||||
if (priv->internal_phy)
|
||||
bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
|
||||
clk_disable_unprepare(priv->clk);
|
||||
return ret;
|
||||
}
|
||||
|
@ -3177,6 +3253,12 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
|
|||
*/
|
||||
gphy_rev = reg & 0xffff;
|
||||
|
||||
/* This is reserved so should require special treatment */
|
||||
if (gphy_rev == 0 || gphy_rev == 0x01ff) {
|
||||
pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
|
||||
return;
|
||||
}
|
||||
|
||||
/* This is the good old scheme, just GPHY major, no minor nor patch */
|
||||
if ((gphy_rev & 0xf0) != 0)
|
||||
priv->gphy_rev = gphy_rev << 8;
|
||||
|
@ -3185,12 +3267,6 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
|
|||
else if ((gphy_rev & 0xff00) != 0)
|
||||
priv->gphy_rev = gphy_rev;
|
||||
|
||||
/* This is reserved so should require special treatment */
|
||||
else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
|
||||
pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PHYS_ADDR_T_64BIT
|
||||
if (!(params->flags & GENET_HAS_40BITS))
|
||||
pr_warn("GENET does not support 40-bits PA\n");
|
||||
|
@ -3233,6 +3309,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
|
|||
const void *macaddr;
|
||||
struct resource *r;
|
||||
int err = -EIO;
|
||||
const char *phy_mode_str;
|
||||
|
||||
/* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
|
||||
dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
|
||||
|
@ -3276,6 +3353,8 @@ static int bcmgenet_probe(struct platform_device *pdev)
|
|||
goto err;
|
||||
}
|
||||
|
||||
spin_lock_init(&priv->lock);
|
||||
|
||||
SET_NETDEV_DEV(dev, &pdev->dev);
|
||||
dev_set_drvdata(&pdev->dev, dev);
|
||||
ether_addr_copy(dev->dev_addr, macaddr);
|
||||
|
@ -3338,6 +3417,13 @@ static int bcmgenet_probe(struct platform_device *pdev)
|
|||
priv->clk_eee = NULL;
|
||||
}
|
||||
|
||||
/* If this is an internal GPHY, power it on now, before UniMAC is
|
||||
* brought out of reset as absolutely no UniMAC activity is allowed
|
||||
*/
|
||||
if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) &&
|
||||
!strcasecmp(phy_mode_str, "internal"))
|
||||
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
|
||||
|
||||
err = reset_umac(priv);
|
||||
if (err)
|
||||
goto err_clk_disable;
|
||||
|
@ -3502,6 +3588,8 @@ static int bcmgenet_resume(struct device *d)
|
|||
return 0;
|
||||
|
||||
out_clk_disable:
|
||||
if (priv->internal_phy)
|
||||
bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
|
||||
clk_disable_unprepare(priv->clk);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2014 Broadcom Corporation
|
||||
* Copyright (c) 2014-2017 Broadcom
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
|
@ -214,7 +214,9 @@ struct bcmgenet_mib_counters {
|
|||
#define MDIO_REG_SHIFT 16
|
||||
#define MDIO_REG_MASK 0x1F
|
||||
|
||||
#define UMAC_RBUF_OVFL_CNT 0x61C
|
||||
#define UMAC_RBUF_OVFL_CNT_V1 0x61C
|
||||
#define RBUF_OVFL_CNT_V2 0x80
|
||||
#define RBUF_OVFL_CNT_V3PLUS 0x94
|
||||
|
||||
#define UMAC_MPD_CTRL 0x620
|
||||
#define MPD_EN (1 << 0)
|
||||
|
@ -224,7 +226,9 @@ struct bcmgenet_mib_counters {
|
|||
|
||||
#define UMAC_MPD_PW_MS 0x624
|
||||
#define UMAC_MPD_PW_LS 0x628
|
||||
#define UMAC_RBUF_ERR_CNT 0x634
|
||||
#define UMAC_RBUF_ERR_CNT_V1 0x634
|
||||
#define RBUF_ERR_CNT_V2 0x84
|
||||
#define RBUF_ERR_CNT_V3PLUS 0x98
|
||||
#define UMAC_MDF_ERR_CNT 0x638
|
||||
#define UMAC_MDF_CTRL 0x650
|
||||
#define UMAC_MDF_ADDR 0x654
|
||||
|
@ -619,11 +623,13 @@ struct bcmgenet_priv {
|
|||
struct work_struct bcmgenet_irq_work;
|
||||
int irq0;
|
||||
int irq1;
|
||||
unsigned int irq0_stat;
|
||||
unsigned int irq1_stat;
|
||||
int wol_irq;
|
||||
bool wol_irq_disabled;
|
||||
|
||||
/* shared status */
|
||||
spinlock_t lock;
|
||||
unsigned int irq0_stat;
|
||||
|
||||
/* HW descriptors/checksum variables */
|
||||
bool desc_64b_en;
|
||||
bool desc_rxchk_en;
|
||||
|
|
|
@ -152,7 +152,7 @@ struct octnic_gather {
|
|||
*/
|
||||
struct octeon_sg_entry *sg;
|
||||
|
||||
u64 sg_dma_ptr;
|
||||
dma_addr_t sg_dma_ptr;
|
||||
};
|
||||
|
||||
struct handshake {
|
||||
|
@ -734,6 +734,9 @@ static void delete_glists(struct lio *lio)
|
|||
struct octnic_gather *g;
|
||||
int i;
|
||||
|
||||
kfree(lio->glist_lock);
|
||||
lio->glist_lock = NULL;
|
||||
|
||||
if (!lio->glist)
|
||||
return;
|
||||
|
||||
|
@ -741,23 +744,26 @@ static void delete_glists(struct lio *lio)
|
|||
do {
|
||||
g = (struct octnic_gather *)
|
||||
list_delete_head(&lio->glist[i]);
|
||||
if (g) {
|
||||
if (g->sg) {
|
||||
dma_unmap_single(&lio->oct_dev->
|
||||
pci_dev->dev,
|
||||
g->sg_dma_ptr,
|
||||
g->sg_size,
|
||||
DMA_TO_DEVICE);
|
||||
kfree((void *)((unsigned long)g->sg -
|
||||
g->adjust));
|
||||
}
|
||||
if (g)
|
||||
kfree(g);
|
||||
}
|
||||
} while (g);
|
||||
|
||||
if (lio->glists_virt_base && lio->glists_virt_base[i]) {
|
||||
lio_dma_free(lio->oct_dev,
|
||||
lio->glist_entry_size * lio->tx_qsize,
|
||||
lio->glists_virt_base[i],
|
||||
lio->glists_dma_base[i]);
|
||||
}
|
||||
}
|
||||
|
||||
kfree((void *)lio->glist);
|
||||
kfree((void *)lio->glist_lock);
|
||||
kfree(lio->glists_virt_base);
|
||||
lio->glists_virt_base = NULL;
|
||||
|
||||
kfree(lio->glists_dma_base);
|
||||
lio->glists_dma_base = NULL;
|
||||
|
||||
kfree(lio->glist);
|
||||
lio->glist = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -772,13 +778,30 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
|
|||
lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
|
||||
GFP_KERNEL);
|
||||
if (!lio->glist_lock)
|
||||
return 1;
|
||||
return -ENOMEM;
|
||||
|
||||
lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
|
||||
GFP_KERNEL);
|
||||
if (!lio->glist) {
|
||||
kfree((void *)lio->glist_lock);
|
||||
return 1;
|
||||
kfree(lio->glist_lock);
|
||||
lio->glist_lock = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
lio->glist_entry_size =
|
||||
ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
|
||||
|
||||
/* allocate memory to store virtual and dma base address of
|
||||
* per glist consistent memory
|
||||
*/
|
||||
lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
|
||||
GFP_KERNEL);
|
||||
lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!lio->glists_virt_base || !lio->glists_dma_base) {
|
||||
delete_glists(lio);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_iqs; i++) {
|
||||
|
@ -788,6 +811,16 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
|
|||
|
||||
INIT_LIST_HEAD(&lio->glist[i]);
|
||||
|
||||
lio->glists_virt_base[i] =
|
||||
lio_dma_alloc(oct,
|
||||
lio->glist_entry_size * lio->tx_qsize,
|
||||
&lio->glists_dma_base[i]);
|
||||
|
||||
if (!lio->glists_virt_base[i]) {
|
||||
delete_glists(lio);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (j = 0; j < lio->tx_qsize; j++) {
|
||||
g = kzalloc_node(sizeof(*g), GFP_KERNEL,
|
||||
numa_node);
|
||||
|
@ -796,43 +829,18 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
|
|||
if (!g)
|
||||
break;
|
||||
|
||||
g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
|
||||
OCT_SG_ENTRY_SIZE);
|
||||
g->sg = lio->glists_virt_base[i] +
|
||||
(j * lio->glist_entry_size);
|
||||
|
||||
g->sg = kmalloc_node(g->sg_size + 8,
|
||||
GFP_KERNEL, numa_node);
|
||||
if (!g->sg)
|
||||
g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
|
||||
if (!g->sg) {
|
||||
kfree(g);
|
||||
break;
|
||||
}
|
||||
|
||||
/* The gather component should be aligned on 64-bit
|
||||
* boundary
|
||||
*/
|
||||
if (((unsigned long)g->sg) & 7) {
|
||||
g->adjust = 8 - (((unsigned long)g->sg) & 7);
|
||||
g->sg = (struct octeon_sg_entry *)
|
||||
((unsigned long)g->sg + g->adjust);
|
||||
}
|
||||
g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev,
|
||||
g->sg, g->sg_size,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(&oct->pci_dev->dev,
|
||||
g->sg_dma_ptr)) {
|
||||
kfree((void *)((unsigned long)g->sg -
|
||||
g->adjust));
|
||||
kfree(g);
|
||||
break;
|
||||
}
|
||||
g->sg_dma_ptr = lio->glists_dma_base[i] +
|
||||
(j * lio->glist_entry_size);
|
||||
|
||||
list_add_tail(&g->list, &lio->glist[i]);
|
||||
}
|
||||
|
||||
if (j != lio->tx_qsize) {
|
||||
delete_glists(lio);
|
||||
return 1;
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1885,9 +1893,6 @@ static void free_netsgbuf(void *buf)
|
|||
i++;
|
||||
}
|
||||
|
||||
dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
|
||||
g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
|
||||
|
||||
iq = skb_iq(lio, skb);
|
||||
spin_lock(&lio->glist_lock[iq]);
|
||||
list_add_tail(&g->list, &lio->glist[iq]);
|
||||
|
@ -1933,9 +1938,6 @@ static void free_netsgbuf_with_resp(void *buf)
|
|||
i++;
|
||||
}
|
||||
|
||||
dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
|
||||
g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
|
||||
|
||||
iq = skb_iq(lio, skb);
|
||||
|
||||
spin_lock(&lio->glist_lock[iq]);
|
||||
|
@ -3273,8 +3275,6 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
i++;
|
||||
}
|
||||
|
||||
dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr,
|
||||
g->sg_size, DMA_TO_DEVICE);
|
||||
dptr = g->sg_dma_ptr;
|
||||
|
||||
if (OCTEON_CN23XX_PF(oct))
|
||||
|
|
|
@ -108,6 +108,8 @@ struct octnic_gather {
|
|||
* received from the IP layer.
|
||||
*/
|
||||
struct octeon_sg_entry *sg;
|
||||
|
||||
dma_addr_t sg_dma_ptr;
|
||||
};
|
||||
|
||||
struct octeon_device_priv {
|
||||
|
@ -490,6 +492,9 @@ static void delete_glists(struct lio *lio)
|
|||
struct octnic_gather *g;
|
||||
int i;
|
||||
|
||||
kfree(lio->glist_lock);
|
||||
lio->glist_lock = NULL;
|
||||
|
||||
if (!lio->glist)
|
||||
return;
|
||||
|
||||
|
@ -497,17 +502,26 @@ static void delete_glists(struct lio *lio)
|
|||
do {
|
||||
g = (struct octnic_gather *)
|
||||
list_delete_head(&lio->glist[i]);
|
||||
if (g) {
|
||||
if (g->sg)
|
||||
kfree((void *)((unsigned long)g->sg -
|
||||
g->adjust));
|
||||
if (g)
|
||||
kfree(g);
|
||||
}
|
||||
} while (g);
|
||||
|
||||
if (lio->glists_virt_base && lio->glists_virt_base[i]) {
|
||||
lio_dma_free(lio->oct_dev,
|
||||
lio->glist_entry_size * lio->tx_qsize,
|
||||
lio->glists_virt_base[i],
|
||||
lio->glists_dma_base[i]);
|
||||
}
|
||||
}
|
||||
|
||||
kfree(lio->glists_virt_base);
|
||||
lio->glists_virt_base = NULL;
|
||||
|
||||
kfree(lio->glists_dma_base);
|
||||
lio->glists_dma_base = NULL;
|
||||
|
||||
kfree(lio->glist);
|
||||
kfree(lio->glist_lock);
|
||||
lio->glist = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -522,13 +536,30 @@ static int setup_glists(struct lio *lio, int num_iqs)
|
|||
lio->glist_lock =
|
||||
kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL);
|
||||
if (!lio->glist_lock)
|
||||
return 1;
|
||||
return -ENOMEM;
|
||||
|
||||
lio->glist =
|
||||
kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL);
|
||||
if (!lio->glist) {
|
||||
kfree(lio->glist_lock);
|
||||
return 1;
|
||||
lio->glist_lock = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
lio->glist_entry_size =
|
||||
ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
|
||||
|
||||
/* allocate memory to store virtual and dma base address of
|
||||
* per glist consistent memory
|
||||
*/
|
||||
lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
|
||||
GFP_KERNEL);
|
||||
lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!lio->glists_virt_base || !lio->glists_dma_base) {
|
||||
delete_glists(lio);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_iqs; i++) {
|
||||
|
@ -536,34 +567,33 @@ static int setup_glists(struct lio *lio, int num_iqs)
|
|||
|
||||
INIT_LIST_HEAD(&lio->glist[i]);
|
||||
|
||||
lio->glists_virt_base[i] =
|
||||
lio_dma_alloc(lio->oct_dev,
|
||||
lio->glist_entry_size * lio->tx_qsize,
|
||||
&lio->glists_dma_base[i]);
|
||||
|
||||
if (!lio->glists_virt_base[i]) {
|
||||
delete_glists(lio);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (j = 0; j < lio->tx_qsize; j++) {
|
||||
g = kzalloc(sizeof(*g), GFP_KERNEL);
|
||||
if (!g)
|
||||
break;
|
||||
|
||||
g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
|
||||
OCT_SG_ENTRY_SIZE);
|
||||
g->sg = lio->glists_virt_base[i] +
|
||||
(j * lio->glist_entry_size);
|
||||
|
||||
g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
|
||||
if (!g->sg) {
|
||||
kfree(g);
|
||||
break;
|
||||
}
|
||||
g->sg_dma_ptr = lio->glists_dma_base[i] +
|
||||
(j * lio->glist_entry_size);
|
||||
|
||||
/* The gather component should be aligned on 64-bit
|
||||
* boundary
|
||||
*/
|
||||
if (((unsigned long)g->sg) & 7) {
|
||||
g->adjust = 8 - (((unsigned long)g->sg) & 7);
|
||||
g->sg = (struct octeon_sg_entry *)
|
||||
((unsigned long)g->sg + g->adjust);
|
||||
}
|
||||
list_add_tail(&g->list, &lio->glist[i]);
|
||||
}
|
||||
|
||||
if (j != lio->tx_qsize) {
|
||||
delete_glists(lio);
|
||||
return 1;
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1324,10 +1354,6 @@ static void free_netsgbuf(void *buf)
|
|||
i++;
|
||||
}
|
||||
|
||||
dma_unmap_single(&lio->oct_dev->pci_dev->dev,
|
||||
finfo->dptr, g->sg_size,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
iq = skb_iq(lio, skb);
|
||||
|
||||
spin_lock(&lio->glist_lock[iq]);
|
||||
|
@ -1374,10 +1400,6 @@ static void free_netsgbuf_with_resp(void *buf)
|
|||
i++;
|
||||
}
|
||||
|
||||
dma_unmap_single(&lio->oct_dev->pci_dev->dev,
|
||||
finfo->dptr, g->sg_size,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
iq = skb_iq(lio, skb);
|
||||
|
||||
spin_lock(&lio->glist_lock[iq]);
|
||||
|
@ -2382,23 +2404,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
i++;
|
||||
}
|
||||
|
||||
dptr = dma_map_single(&oct->pci_dev->dev,
|
||||
g->sg, g->sg_size,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
|
||||
dev_err(&oct->pci_dev->dev, "%s DMA mapping error 4\n",
|
||||
__func__);
|
||||
dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0],
|
||||
skb->len - skb->data_len,
|
||||
DMA_TO_DEVICE);
|
||||
for (j = 1; j <= frags; j++) {
|
||||
frag = &skb_shinfo(skb)->frags[j - 1];
|
||||
dma_unmap_page(&oct->pci_dev->dev,
|
||||
g->sg[j >> 2].ptr[j & 3],
|
||||
frag->size, DMA_TO_DEVICE);
|
||||
}
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
dptr = g->sg_dma_ptr;
|
||||
|
||||
ndata.cmd.cmd3.dptr = dptr;
|
||||
finfo->dptr = dptr;
|
||||
|
|
|
@ -71,17 +71,17 @@
|
|||
#define CN23XX_MAX_RINGS_PER_VF 8
|
||||
|
||||
#define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
|
||||
#define CN23XX_MAX_IQ_DESCRIPTORS 2048
|
||||
#define CN23XX_MAX_IQ_DESCRIPTORS 512
|
||||
#define CN23XX_DB_MIN 1
|
||||
#define CN23XX_DB_MAX 8
|
||||
#define CN23XX_DB_TIMEOUT 1
|
||||
|
||||
#define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
|
||||
#define CN23XX_MAX_OQ_DESCRIPTORS 2048
|
||||
#define CN23XX_MAX_OQ_DESCRIPTORS 512
|
||||
#define CN23XX_OQ_BUF_SIZE 1536
|
||||
#define CN23XX_OQ_PKTSPER_INTR 128
|
||||
/*#define CAVIUM_ONLY_CN23XX_RX_PERF*/
|
||||
#define CN23XX_OQ_REFIL_THRESHOLD 128
|
||||
#define CN23XX_OQ_REFIL_THRESHOLD 16
|
||||
|
||||
#define CN23XX_OQ_INTR_PKT 64
|
||||
#define CN23XX_OQ_INTR_TIME 100
|
||||
|
|
|
@ -155,11 +155,6 @@ octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
|
|||
recv_buffer_destroy(droq->recv_buf_list[i].buffer,
|
||||
pg_info);
|
||||
|
||||
if (droq->desc_ring && droq->desc_ring[i].info_ptr)
|
||||
lio_unmap_ring_info(oct->pci_dev,
|
||||
(u64)droq->
|
||||
desc_ring[i].info_ptr,
|
||||
OCT_DROQ_INFO_SIZE);
|
||||
droq->recv_buf_list[i].buffer = NULL;
|
||||
}
|
||||
|
||||
|
@ -211,10 +206,7 @@ int octeon_delete_droq(struct octeon_device *oct, u32 q_no)
|
|||
vfree(droq->recv_buf_list);
|
||||
|
||||
if (droq->info_base_addr)
|
||||
cnnic_free_aligned_dma(oct->pci_dev, droq->info_list,
|
||||
droq->info_alloc_size,
|
||||
droq->info_base_addr,
|
||||
droq->info_list_dma);
|
||||
lio_free_info_buffer(oct, droq);
|
||||
|
||||
if (droq->desc_ring)
|
||||
lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
|
||||
|
@ -294,12 +286,7 @@ int octeon_init_droq(struct octeon_device *oct,
|
|||
dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
|
||||
droq->max_count);
|
||||
|
||||
droq->info_list =
|
||||
cnnic_numa_alloc_aligned_dma((droq->max_count *
|
||||
OCT_DROQ_INFO_SIZE),
|
||||
&droq->info_alloc_size,
|
||||
&droq->info_base_addr,
|
||||
numa_node);
|
||||
droq->info_list = lio_alloc_info_buffer(oct, droq);
|
||||
if (!droq->info_list) {
|
||||
dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
|
||||
lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
|
||||
|
|
|
@ -325,10 +325,10 @@ struct octeon_droq {
|
|||
size_t desc_ring_dma;
|
||||
|
||||
/** Info ptr list are allocated at this virtual address. */
|
||||
size_t info_base_addr;
|
||||
void *info_base_addr;
|
||||
|
||||
/** DMA mapped address of the info list */
|
||||
size_t info_list_dma;
|
||||
dma_addr_t info_list_dma;
|
||||
|
||||
/** Allocated size of info list. */
|
||||
u32 info_alloc_size;
|
||||
|
|
|
@ -140,48 +140,6 @@ static inline int octeon_map_pci_barx(struct octeon_device *oct,
|
|||
return 1;
|
||||
}
|
||||
|
||||
static inline void *
|
||||
cnnic_numa_alloc_aligned_dma(u32 size,
|
||||
u32 *alloc_size,
|
||||
size_t *orig_ptr,
|
||||
int numa_node)
|
||||
{
|
||||
int retries = 0;
|
||||
void *ptr = NULL;
|
||||
|
||||
#define OCTEON_MAX_ALLOC_RETRIES 1
|
||||
do {
|
||||
struct page *page = NULL;
|
||||
|
||||
page = alloc_pages_node(numa_node,
|
||||
GFP_KERNEL,
|
||||
get_order(size));
|
||||
if (!page)
|
||||
page = alloc_pages(GFP_KERNEL,
|
||||
get_order(size));
|
||||
ptr = (void *)page_address(page);
|
||||
if ((unsigned long)ptr & 0x07) {
|
||||
__free_pages(page, get_order(size));
|
||||
ptr = NULL;
|
||||
/* Increment the size required if the first
|
||||
* attempt failed.
|
||||
*/
|
||||
if (!retries)
|
||||
size += 7;
|
||||
}
|
||||
retries++;
|
||||
} while ((retries <= OCTEON_MAX_ALLOC_RETRIES) && !ptr);
|
||||
|
||||
*alloc_size = size;
|
||||
*orig_ptr = (unsigned long)ptr;
|
||||
if ((unsigned long)ptr & 0x07)
|
||||
ptr = (void *)(((unsigned long)ptr + 7) & ~(7UL));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
#define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \
|
||||
free_pages(orig_ptr, get_order(size))
|
||||
|
||||
static inline int
|
||||
sleep_cond(wait_queue_head_t *wait_queue, int *condition)
|
||||
{
|
||||
|
|
|
@ -62,6 +62,9 @@ struct lio {
|
|||
|
||||
/** Array of gather component linked lists */
|
||||
struct list_head *glist;
|
||||
void **glists_virt_base;
|
||||
dma_addr_t *glists_dma_base;
|
||||
u32 glist_entry_size;
|
||||
|
||||
/** Pointer to the NIC properties for the Octeon device this network
|
||||
* interface is associated with.
|
||||
|
@ -344,6 +347,29 @@ static inline void tx_buffer_free(void *buffer)
|
|||
#define lio_dma_free(oct, size, virt_addr, dma_addr) \
|
||||
dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr)
|
||||
|
||||
static inline void *
|
||||
lio_alloc_info_buffer(struct octeon_device *oct,
|
||||
struct octeon_droq *droq)
|
||||
{
|
||||
void *virt_ptr;
|
||||
|
||||
virt_ptr = lio_dma_alloc(oct, (droq->max_count * OCT_DROQ_INFO_SIZE),
|
||||
&droq->info_list_dma);
|
||||
if (virt_ptr) {
|
||||
droq->info_alloc_size = droq->max_count * OCT_DROQ_INFO_SIZE;
|
||||
droq->info_base_addr = virt_ptr;
|
||||
}
|
||||
|
||||
return virt_ptr;
|
||||
}
|
||||
|
||||
static inline void lio_free_info_buffer(struct octeon_device *oct,
|
||||
struct octeon_droq *droq)
|
||||
{
|
||||
lio_dma_free(oct, droq->info_alloc_size, droq->info_base_addr,
|
||||
droq->info_list_dma);
|
||||
}
|
||||
|
||||
static inline
|
||||
void *get_rbd(struct sk_buff *skb)
|
||||
{
|
||||
|
@ -359,22 +385,7 @@ void *get_rbd(struct sk_buff *skb)
|
|||
static inline u64
|
||||
lio_map_ring_info(struct octeon_droq *droq, u32 i)
|
||||
{
|
||||
dma_addr_t dma_addr;
|
||||
struct octeon_device *oct = droq->oct_dev;
|
||||
|
||||
dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i],
|
||||
OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE);
|
||||
|
||||
WARN_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
|
||||
|
||||
return (u64)dma_addr;
|
||||
}
|
||||
|
||||
static inline void
|
||||
lio_unmap_ring_info(struct pci_dev *pci_dev,
|
||||
u64 info_ptr, u32 size)
|
||||
{
|
||||
dma_unmap_single(&pci_dev->dev, info_ptr, size, DMA_FROM_DEVICE);
|
||||
return droq->info_list_dma + (i * sizeof(struct octeon_droq_info));
|
||||
}
|
||||
|
||||
static inline u64
|
||||
|
|
|
@ -269,6 +269,7 @@ struct nicvf {
|
|||
#define MAX_QUEUES_PER_QSET 8
|
||||
struct queue_set *qs;
|
||||
struct nicvf_cq_poll *napi[8];
|
||||
void *iommu_domain;
|
||||
u8 vf_id;
|
||||
u8 sqs_id;
|
||||
bool sqs_mode;
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/log2.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/iommu.h>
|
||||
|
||||
#include "nic_reg.h"
|
||||
#include "nic.h"
|
||||
|
@ -525,7 +526,12 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
|
|||
/* Get actual TSO descriptors and free them */
|
||||
tso_sqe =
|
||||
(struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
|
||||
nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
|
||||
tso_sqe->subdesc_cnt);
|
||||
nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
|
||||
} else {
|
||||
nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
|
||||
hdr->subdesc_cnt);
|
||||
}
|
||||
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
|
||||
prefetch(skb);
|
||||
|
@ -576,6 +582,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
|
|||
{
|
||||
struct sk_buff *skb;
|
||||
struct nicvf *nic = netdev_priv(netdev);
|
||||
struct nicvf *snic = nic;
|
||||
int err = 0;
|
||||
int rq_idx;
|
||||
|
||||
|
@ -592,7 +599,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
|
|||
if (err && !cqe_rx->rb_cnt)
|
||||
return;
|
||||
|
||||
skb = nicvf_get_rcv_skb(nic, cqe_rx);
|
||||
skb = nicvf_get_rcv_skb(snic, cqe_rx);
|
||||
if (!skb) {
|
||||
netdev_dbg(nic->netdev, "Packet not received\n");
|
||||
return;
|
||||
|
@ -1643,6 +1650,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
if (!pass1_silicon(nic->pdev))
|
||||
nic->hw_tso = true;
|
||||
|
||||
/* Get iommu domain for iova to physical addr conversion */
|
||||
nic->iommu_domain = iommu_get_domain_for_dev(dev);
|
||||
|
||||
pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
|
||||
if (sdevid == 0xA134)
|
||||
nic->t88 = true;
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/netdevice.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/tso.h>
|
||||
|
||||
|
@ -18,6 +19,16 @@
|
|||
#include "q_struct.h"
|
||||
#include "nicvf_queues.h"
|
||||
|
||||
#define NICVF_PAGE_ORDER ((PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0)
|
||||
|
||||
static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr)
|
||||
{
|
||||
/* Translation is installed only when IOMMU is present */
|
||||
if (nic->iommu_domain)
|
||||
return iommu_iova_to_phys(nic->iommu_domain, dma_addr);
|
||||
return dma_addr;
|
||||
}
|
||||
|
||||
static void nicvf_get_page(struct nicvf *nic)
|
||||
{
|
||||
if (!nic->rb_pageref || !nic->rb_page)
|
||||
|
@ -87,7 +98,7 @@ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
|
|||
static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
|
||||
u32 buf_len, u64 **rbuf)
|
||||
{
|
||||
int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0;
|
||||
int order = NICVF_PAGE_ORDER;
|
||||
|
||||
/* Check if request can be accomodated in previous allocated page */
|
||||
if (nic->rb_page &&
|
||||
|
@ -97,22 +108,27 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
|
|||
}
|
||||
|
||||
nicvf_get_page(nic);
|
||||
nic->rb_page = NULL;
|
||||
|
||||
/* Allocate a new page */
|
||||
nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
|
||||
order);
|
||||
if (!nic->rb_page) {
|
||||
nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
|
||||
order);
|
||||
if (!nic->rb_page) {
|
||||
this_cpu_inc(nic->pnicvf->drv_stats->
|
||||
rcv_buffer_alloc_failures);
|
||||
return -ENOMEM;
|
||||
}
|
||||
nic->rb_page_offset = 0;
|
||||
this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
nic->rb_page_offset = 0;
|
||||
ret:
|
||||
*rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset);
|
||||
/* HW will ensure data coherency, CPU sync not required */
|
||||
*rbuf = (u64 *)((u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
|
||||
nic->rb_page_offset, buf_len,
|
||||
DMA_FROM_DEVICE,
|
||||
DMA_ATTR_SKIP_CPU_SYNC));
|
||||
if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) {
|
||||
if (!nic->rb_page_offset)
|
||||
__free_pages(nic->rb_page, order);
|
||||
nic->rb_page = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
nic->rb_page_offset += buf_len;
|
||||
|
||||
return 0;
|
||||
|
@ -158,16 +174,21 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
|
|||
rbdr->dma_size = buf_size;
|
||||
rbdr->enable = true;
|
||||
rbdr->thresh = RBDR_THRESH;
|
||||
rbdr->head = 0;
|
||||
rbdr->tail = 0;
|
||||
|
||||
nic->rb_page = NULL;
|
||||
for (idx = 0; idx < ring_len; idx++) {
|
||||
err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
|
||||
&rbuf);
|
||||
if (err)
|
||||
if (err) {
|
||||
/* To free already allocated and mapped ones */
|
||||
rbdr->tail = idx - 1;
|
||||
return err;
|
||||
}
|
||||
|
||||
desc = GET_RBDR_DESC(rbdr, idx);
|
||||
desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
|
||||
desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
|
||||
}
|
||||
|
||||
nicvf_get_page(nic);
|
||||
|
@ -179,7 +200,7 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
|
|||
static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
|
||||
{
|
||||
int head, tail;
|
||||
u64 buf_addr;
|
||||
u64 buf_addr, phys_addr;
|
||||
struct rbdr_entry_t *desc;
|
||||
|
||||
if (!rbdr)
|
||||
|
@ -192,18 +213,26 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
|
|||
head = rbdr->head;
|
||||
tail = rbdr->tail;
|
||||
|
||||
/* Free SKBs */
|
||||
/* Release page references */
|
||||
while (head != tail) {
|
||||
desc = GET_RBDR_DESC(rbdr, head);
|
||||
buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
|
||||
put_page(virt_to_page(phys_to_virt(buf_addr)));
|
||||
buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
|
||||
phys_addr = nicvf_iova_to_phys(nic, buf_addr);
|
||||
dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
|
||||
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (phys_addr)
|
||||
put_page(virt_to_page(phys_to_virt(phys_addr)));
|
||||
head++;
|
||||
head &= (rbdr->dmem.q_len - 1);
|
||||
}
|
||||
/* Free SKB of tail desc */
|
||||
/* Release buffer of tail desc */
|
||||
desc = GET_RBDR_DESC(rbdr, tail);
|
||||
buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
|
||||
put_page(virt_to_page(phys_to_virt(buf_addr)));
|
||||
buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
|
||||
phys_addr = nicvf_iova_to_phys(nic, buf_addr);
|
||||
dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
|
||||
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (phys_addr)
|
||||
put_page(virt_to_page(phys_to_virt(phys_addr)));
|
||||
|
||||
/* Free RBDR ring */
|
||||
nicvf_free_q_desc_mem(nic, &rbdr->dmem);
|
||||
|
@ -250,7 +279,7 @@ static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
|
|||
break;
|
||||
|
||||
desc = GET_RBDR_DESC(rbdr, tail);
|
||||
desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
|
||||
desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
|
||||
refill_rb_cnt--;
|
||||
new_rb++;
|
||||
}
|
||||
|
@ -361,9 +390,29 @@ static int nicvf_init_snd_queue(struct nicvf *nic,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
|
||||
int hdr_sqe, u8 subdesc_cnt)
|
||||
{
|
||||
u8 idx;
|
||||
struct sq_gather_subdesc *gather;
|
||||
|
||||
/* Unmap DMA mapped skb data buffers */
|
||||
for (idx = 0; idx < subdesc_cnt; idx++) {
|
||||
hdr_sqe++;
|
||||
hdr_sqe &= (sq->dmem.q_len - 1);
|
||||
gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, hdr_sqe);
|
||||
/* HW will ensure data coherency, CPU sync not required */
|
||||
dma_unmap_page_attrs(&nic->pdev->dev, gather->addr,
|
||||
gather->size, DMA_TO_DEVICE,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
}
|
||||
}
|
||||
|
||||
static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct sq_hdr_subdesc *hdr;
|
||||
struct sq_hdr_subdesc *tso_sqe;
|
||||
|
||||
if (!sq)
|
||||
return;
|
||||
|
@ -379,8 +428,22 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
|
|||
smp_rmb();
|
||||
while (sq->head != sq->tail) {
|
||||
skb = (struct sk_buff *)sq->skbuff[sq->head];
|
||||
if (skb)
|
||||
dev_kfree_skb_any(skb);
|
||||
if (!skb)
|
||||
goto next;
|
||||
hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
|
||||
/* Check for dummy descriptor used for HW TSO offload on 88xx */
|
||||
if (hdr->dont_send) {
|
||||
/* Get actual TSO descriptors and unmap them */
|
||||
tso_sqe =
|
||||
(struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
|
||||
nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
|
||||
tso_sqe->subdesc_cnt);
|
||||
} else {
|
||||
nicvf_unmap_sndq_buffers(nic, sq, sq->head,
|
||||
hdr->subdesc_cnt);
|
||||
}
|
||||
dev_kfree_skb_any(skb);
|
||||
next:
|
||||
sq->head++;
|
||||
sq->head &= (sq->dmem.q_len - 1);
|
||||
}
|
||||
|
@ -559,9 +622,11 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
|
|||
nicvf_send_msg_to_pf(nic, &mbx);
|
||||
|
||||
if (!nic->sqs_mode && (qidx == 0)) {
|
||||
/* Enable checking L3/L4 length and TCP/UDP checksums */
|
||||
/* Enable checking L3/L4 length and TCP/UDP checksums
|
||||
* Also allow IPv6 pkts with zero UDP checksum.
|
||||
*/
|
||||
nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
|
||||
(BIT(24) | BIT(23) | BIT(21)));
|
||||
(BIT(24) | BIT(23) | BIT(21) | BIT(20)));
|
||||
nicvf_config_vlan_stripping(nic, nic->netdev->features);
|
||||
}
|
||||
|
||||
|
@ -882,6 +947,14 @@ static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
|
|||
return qentry;
|
||||
}
|
||||
|
||||
/* Rollback to previous tail pointer when descriptors not used */
|
||||
static inline void nicvf_rollback_sq_desc(struct snd_queue *sq,
|
||||
int qentry, int desc_cnt)
|
||||
{
|
||||
sq->tail = qentry;
|
||||
atomic_add(desc_cnt, &sq->free_cnt);
|
||||
}
|
||||
|
||||
/* Free descriptor back to SQ for future use */
|
||||
void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
|
||||
{
|
||||
|
@ -1207,8 +1280,9 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
|
|||
struct sk_buff *skb, u8 sq_num)
|
||||
{
|
||||
int i, size;
|
||||
int subdesc_cnt, tso_sqe = 0;
|
||||
int subdesc_cnt, hdr_sqe = 0;
|
||||
int qentry;
|
||||
u64 dma_addr;
|
||||
|
||||
subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
|
||||
if (subdesc_cnt > atomic_read(&sq->free_cnt))
|
||||
|
@ -1223,12 +1297,21 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
|
|||
/* Add SQ header subdesc */
|
||||
nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
|
||||
skb, skb->len);
|
||||
tso_sqe = qentry;
|
||||
hdr_sqe = qentry;
|
||||
|
||||
/* Add SQ gather subdescs */
|
||||
qentry = nicvf_get_nxt_sqentry(sq, qentry);
|
||||
size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
|
||||
nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data));
|
||||
/* HW will ensure data coherency, CPU sync not required */
|
||||
dma_addr = dma_map_page_attrs(&nic->pdev->dev, virt_to_page(skb->data),
|
||||
offset_in_page(skb->data), size,
|
||||
DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
|
||||
nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
|
||||
|
||||
/* Check for scattered buffer */
|
||||
if (!skb_is_nonlinear(skb))
|
||||
|
@ -1241,15 +1324,26 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
|
|||
|
||||
qentry = nicvf_get_nxt_sqentry(sq, qentry);
|
||||
size = skb_frag_size(frag);
|
||||
nicvf_sq_add_gather_subdesc(sq, qentry, size,
|
||||
virt_to_phys(
|
||||
skb_frag_address(frag)));
|
||||
dma_addr = dma_map_page_attrs(&nic->pdev->dev,
|
||||
skb_frag_page(frag),
|
||||
frag->page_offset, size,
|
||||
DMA_TO_DEVICE,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
|
||||
/* Free entire chain of mapped buffers
|
||||
* here 'i' = frags mapped + above mapped skb->data
|
||||
*/
|
||||
nicvf_unmap_sndq_buffers(nic, sq, hdr_sqe, i);
|
||||
nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
|
||||
return 0;
|
||||
}
|
||||
nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
|
||||
}
|
||||
|
||||
doorbell:
|
||||
if (nic->t88 && skb_shinfo(skb)->gso_size) {
|
||||
qentry = nicvf_get_nxt_sqentry(sq, qentry);
|
||||
nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb);
|
||||
nicvf_sq_add_cqe_subdesc(sq, qentry, hdr_sqe, skb);
|
||||
}
|
||||
|
||||
nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
|
||||
|
@ -1282,6 +1376,7 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
|
|||
int offset;
|
||||
u16 *rb_lens = NULL;
|
||||
u64 *rb_ptrs = NULL;
|
||||
u64 phys_addr;
|
||||
|
||||
rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
|
||||
/* Except 88xx pass1 on all other chips CQE_RX2_S is added to
|
||||
|
@ -1296,15 +1391,23 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
|
|||
else
|
||||
rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
|
||||
|
||||
netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
|
||||
__func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
|
||||
|
||||
for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
|
||||
payload_len = rb_lens[frag_num(frag)];
|
||||
phys_addr = nicvf_iova_to_phys(nic, *rb_ptrs);
|
||||
if (!phys_addr) {
|
||||
if (skb)
|
||||
dev_kfree_skb_any(skb);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!frag) {
|
||||
/* First fragment */
|
||||
dma_unmap_page_attrs(&nic->pdev->dev,
|
||||
*rb_ptrs - cqe_rx->align_pad,
|
||||
RCV_FRAG_LEN, DMA_FROM_DEVICE,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
skb = nicvf_rb_ptr_to_skb(nic,
|
||||
*rb_ptrs - cqe_rx->align_pad,
|
||||
phys_addr - cqe_rx->align_pad,
|
||||
payload_len);
|
||||
if (!skb)
|
||||
return NULL;
|
||||
|
@ -1312,8 +1415,11 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
|
|||
skb_put(skb, payload_len);
|
||||
} else {
|
||||
/* Add fragments */
|
||||
page = virt_to_page(phys_to_virt(*rb_ptrs));
|
||||
offset = phys_to_virt(*rb_ptrs) - page_address(page);
|
||||
dma_unmap_page_attrs(&nic->pdev->dev, *rb_ptrs,
|
||||
RCV_FRAG_LEN, DMA_FROM_DEVICE,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
page = virt_to_page(phys_to_virt(phys_addr));
|
||||
offset = phys_to_virt(phys_addr) - page_address(page);
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
|
||||
offset, payload_len, RCV_FRAG_LEN);
|
||||
}
|
||||
|
|
|
@ -87,7 +87,7 @@
|
|||
#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
|
||||
#define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13))
|
||||
#define RBDR_THRESH (RCV_BUF_COUNT / 2)
|
||||
#define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */
|
||||
#define DMA_BUFFER_LEN 1536 /* In multiples of 128bytes */
|
||||
#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
|
||||
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
|
||||
|
||||
|
@ -301,6 +301,8 @@ struct queue_set {
|
|||
|
||||
#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
|
||||
|
||||
void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
|
||||
int hdr_sqe, u8 subdesc_cnt);
|
||||
void nicvf_config_vlan_stripping(struct nicvf *nic,
|
||||
netdev_features_t features);
|
||||
int nicvf_set_qset_resources(struct nicvf *nic);
|
||||
|
|
|
@ -123,14 +123,44 @@ static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int max_bgx_per_node;
|
||||
static void set_max_bgx_per_node(struct pci_dev *pdev)
|
||||
{
|
||||
u16 sdevid;
|
||||
|
||||
if (max_bgx_per_node)
|
||||
return;
|
||||
|
||||
pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
|
||||
switch (sdevid) {
|
||||
case PCI_SUBSYS_DEVID_81XX_BGX:
|
||||
max_bgx_per_node = MAX_BGX_PER_CN81XX;
|
||||
break;
|
||||
case PCI_SUBSYS_DEVID_83XX_BGX:
|
||||
max_bgx_per_node = MAX_BGX_PER_CN83XX;
|
||||
break;
|
||||
case PCI_SUBSYS_DEVID_88XX_BGX:
|
||||
default:
|
||||
max_bgx_per_node = MAX_BGX_PER_CN88XX;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static struct bgx *get_bgx(int node, int bgx_idx)
|
||||
{
|
||||
int idx = (node * max_bgx_per_node) + bgx_idx;
|
||||
|
||||
return bgx_vnic[idx];
|
||||
}
|
||||
|
||||
/* Return number of BGX present in HW */
|
||||
unsigned bgx_get_map(int node)
|
||||
{
|
||||
int i;
|
||||
unsigned map = 0;
|
||||
|
||||
for (i = 0; i < MAX_BGX_PER_NODE; i++) {
|
||||
if (bgx_vnic[(node * MAX_BGX_PER_NODE) + i])
|
||||
for (i = 0; i < max_bgx_per_node; i++) {
|
||||
if (bgx_vnic[(node * max_bgx_per_node) + i])
|
||||
map |= (1 << i);
|
||||
}
|
||||
|
||||
|
@ -143,7 +173,7 @@ int bgx_get_lmac_count(int node, int bgx_idx)
|
|||
{
|
||||
struct bgx *bgx;
|
||||
|
||||
bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
|
||||
bgx = get_bgx(node, bgx_idx);
|
||||
if (bgx)
|
||||
return bgx->lmac_count;
|
||||
|
||||
|
@ -158,7 +188,7 @@ void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
|
|||
struct bgx *bgx;
|
||||
struct lmac *lmac;
|
||||
|
||||
bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
|
||||
bgx = get_bgx(node, bgx_idx);
|
||||
if (!bgx)
|
||||
return;
|
||||
|
||||
|
@ -172,7 +202,7 @@ EXPORT_SYMBOL(bgx_get_lmac_link_state);
|
|||
|
||||
const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
|
||||
{
|
||||
struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
|
||||
struct bgx *bgx = get_bgx(node, bgx_idx);
|
||||
|
||||
if (bgx)
|
||||
return bgx->lmac[lmacid].mac;
|
||||
|
@ -183,7 +213,7 @@ EXPORT_SYMBOL(bgx_get_lmac_mac);
|
|||
|
||||
void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
|
||||
{
|
||||
struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
|
||||
struct bgx *bgx = get_bgx(node, bgx_idx);
|
||||
|
||||
if (!bgx)
|
||||
return;
|
||||
|
@ -194,7 +224,7 @@ EXPORT_SYMBOL(bgx_set_lmac_mac);
|
|||
|
||||
void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
|
||||
{
|
||||
struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
|
||||
struct bgx *bgx = get_bgx(node, bgx_idx);
|
||||
struct lmac *lmac;
|
||||
u64 cfg;
|
||||
|
||||
|
@ -217,7 +247,7 @@ EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
|
|||
void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
|
||||
{
|
||||
struct pfc *pfc = (struct pfc *)pause;
|
||||
struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
|
||||
struct bgx *bgx = get_bgx(node, bgx_idx);
|
||||
struct lmac *lmac;
|
||||
u64 cfg;
|
||||
|
||||
|
@ -237,7 +267,7 @@ EXPORT_SYMBOL(bgx_lmac_get_pfc);
|
|||
void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause)
|
||||
{
|
||||
struct pfc *pfc = (struct pfc *)pause;
|
||||
struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
|
||||
struct bgx *bgx = get_bgx(node, bgx_idx);
|
||||
struct lmac *lmac;
|
||||
u64 cfg;
|
||||
|
||||
|
@ -369,7 +399,7 @@ u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
|
|||
{
|
||||
struct bgx *bgx;
|
||||
|
||||
bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
|
||||
bgx = get_bgx(node, bgx_idx);
|
||||
if (!bgx)
|
||||
return 0;
|
||||
|
||||
|
@ -383,7 +413,7 @@ u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
|
|||
{
|
||||
struct bgx *bgx;
|
||||
|
||||
bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
|
||||
bgx = get_bgx(node, bgx_idx);
|
||||
if (!bgx)
|
||||
return 0;
|
||||
|
||||
|
@ -411,7 +441,7 @@ void bgx_lmac_internal_loopback(int node, int bgx_idx,
|
|||
struct lmac *lmac;
|
||||
u64 cfg;
|
||||
|
||||
bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
|
||||
bgx = get_bgx(node, bgx_idx);
|
||||
if (!bgx)
|
||||
return;
|
||||
|
||||
|
@ -1011,12 +1041,6 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
|
|||
dev_info(dev, "%s: 40G_KR4\n", (char *)str);
|
||||
break;
|
||||
case BGX_MODE_QSGMII:
|
||||
if ((lmacid == 0) &&
|
||||
(bgx_get_lane2sds_cfg(bgx, lmac) != lmacid))
|
||||
return;
|
||||
if ((lmacid == 2) &&
|
||||
(bgx_get_lane2sds_cfg(bgx, lmac) == lmacid))
|
||||
return;
|
||||
dev_info(dev, "%s: QSGMII\n", (char *)str);
|
||||
break;
|
||||
case BGX_MODE_RGMII:
|
||||
|
@ -1334,11 +1358,13 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
goto err_release_regions;
|
||||
}
|
||||
|
||||
set_max_bgx_per_node(pdev);
|
||||
|
||||
pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
|
||||
if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
|
||||
bgx->bgx_id = (pci_resource_start(pdev,
|
||||
PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
|
||||
bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE;
|
||||
bgx->bgx_id += nic_get_node_id(pdev) * max_bgx_per_node;
|
||||
bgx->max_lmac = MAX_LMAC_PER_BGX;
|
||||
bgx_vnic[bgx->bgx_id] = bgx;
|
||||
} else {
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
#define MAX_BGX_PER_CN88XX 2
|
||||
#define MAX_BGX_PER_CN81XX 3 /* 2 BGXs + 1 RGX */
|
||||
#define MAX_BGX_PER_CN83XX 4
|
||||
#define MAX_BGX_PER_NODE 4
|
||||
#define MAX_LMAC_PER_BGX 4
|
||||
#define MAX_BGX_CHANS_PER_LMAC 16
|
||||
#define MAX_DMAC_PER_LMAC 8
|
||||
|
|
|
@ -2589,8 +2589,6 @@ static int emac_dt_mdio_probe(struct emac_instance *dev)
|
|||
static int emac_dt_phy_connect(struct emac_instance *dev,
|
||||
struct device_node *phy_handle)
|
||||
{
|
||||
int res;
|
||||
|
||||
dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def),
|
||||
GFP_KERNEL);
|
||||
if (!dev->phy.def)
|
||||
|
@ -2617,7 +2615,7 @@ static int emac_dt_phy_probe(struct emac_instance *dev)
|
|||
{
|
||||
struct device_node *np = dev->ofdev->dev.of_node;
|
||||
struct device_node *phy_handle;
|
||||
int res = 0;
|
||||
int res = 1;
|
||||
|
||||
phy_handle = of_parse_phandle(np, "phy-handle", 0);
|
||||
|
||||
|
@ -2714,13 +2712,24 @@ static int emac_init_phy(struct emac_instance *dev)
|
|||
if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
|
||||
int res = emac_dt_phy_probe(dev);
|
||||
|
||||
mutex_unlock(&emac_phy_map_lock);
|
||||
if (!res)
|
||||
switch (res) {
|
||||
case 1:
|
||||
/* No phy-handle property configured.
|
||||
* Continue with the existing phy probe
|
||||
* and setup code.
|
||||
*/
|
||||
break;
|
||||
|
||||
case 0:
|
||||
mutex_unlock(&emac_phy_map_lock);
|
||||
goto init_phy;
|
||||
|
||||
dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n",
|
||||
res);
|
||||
return res;
|
||||
default:
|
||||
mutex_unlock(&emac_phy_map_lock);
|
||||
dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n",
|
||||
res);
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
if (dev->phy_address != 0xffffffff)
|
||||
|
|
|
@ -404,7 +404,7 @@ static int ibmvnic_open(struct net_device *netdev)
|
|||
send_map_query(adapter);
|
||||
for (i = 0; i < rxadd_subcrqs; i++) {
|
||||
init_rx_pool(adapter, &adapter->rx_pool[i],
|
||||
IBMVNIC_BUFFS_PER_POOL, i,
|
||||
adapter->req_rx_add_entries_per_subcrq, i,
|
||||
be64_to_cpu(size_array[i]), 1);
|
||||
if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
|
||||
dev_err(dev, "Couldn't alloc rx pool\n");
|
||||
|
@ -419,23 +419,23 @@ static int ibmvnic_open(struct net_device *netdev)
|
|||
for (i = 0; i < tx_subcrqs; i++) {
|
||||
tx_pool = &adapter->tx_pool[i];
|
||||
tx_pool->tx_buff =
|
||||
kcalloc(adapter->max_tx_entries_per_subcrq,
|
||||
kcalloc(adapter->req_tx_entries_per_subcrq,
|
||||
sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
|
||||
if (!tx_pool->tx_buff)
|
||||
goto tx_pool_alloc_failed;
|
||||
|
||||
if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
|
||||
adapter->max_tx_entries_per_subcrq *
|
||||
adapter->req_tx_entries_per_subcrq *
|
||||
adapter->req_mtu))
|
||||
goto tx_ltb_alloc_failed;
|
||||
|
||||
tx_pool->free_map =
|
||||
kcalloc(adapter->max_tx_entries_per_subcrq,
|
||||
kcalloc(adapter->req_tx_entries_per_subcrq,
|
||||
sizeof(int), GFP_KERNEL);
|
||||
if (!tx_pool->free_map)
|
||||
goto tx_fm_alloc_failed;
|
||||
|
||||
for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++)
|
||||
for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
|
||||
tx_pool->free_map[j] = j;
|
||||
|
||||
tx_pool->consumer_index = 0;
|
||||
|
@ -705,6 +705,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
|
||||
struct device *dev = &adapter->vdev->dev;
|
||||
struct ibmvnic_tx_buff *tx_buff = NULL;
|
||||
struct ibmvnic_sub_crq_queue *tx_scrq;
|
||||
struct ibmvnic_tx_pool *tx_pool;
|
||||
unsigned int tx_send_failed = 0;
|
||||
unsigned int tx_map_failed = 0;
|
||||
|
@ -724,6 +725,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
int ret = 0;
|
||||
|
||||
tx_pool = &adapter->tx_pool[queue_num];
|
||||
tx_scrq = adapter->tx_scrq[queue_num];
|
||||
txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
|
||||
handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
|
||||
be32_to_cpu(adapter->login_rsp_buf->
|
||||
|
@ -744,7 +746,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
|
||||
tx_pool->consumer_index =
|
||||
(tx_pool->consumer_index + 1) %
|
||||
adapter->max_tx_entries_per_subcrq;
|
||||
adapter->req_tx_entries_per_subcrq;
|
||||
|
||||
tx_buff = &tx_pool->tx_buff[index];
|
||||
tx_buff->skb = skb;
|
||||
|
@ -817,7 +819,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
|
||||
if (tx_pool->consumer_index == 0)
|
||||
tx_pool->consumer_index =
|
||||
adapter->max_tx_entries_per_subcrq - 1;
|
||||
adapter->req_tx_entries_per_subcrq - 1;
|
||||
else
|
||||
tx_pool->consumer_index--;
|
||||
|
||||
|
@ -826,6 +828,14 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
ret = NETDEV_TX_BUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
atomic_inc(&tx_scrq->used);
|
||||
|
||||
if (atomic_read(&tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) {
|
||||
netdev_info(netdev, "Stopping queue %d\n", queue_num);
|
||||
netif_stop_subqueue(netdev, queue_num);
|
||||
}
|
||||
|
||||
tx_packets++;
|
||||
tx_bytes += skb->len;
|
||||
txq->trans_start = jiffies;
|
||||
|
@ -1213,6 +1223,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
|
|||
scrq->adapter = adapter;
|
||||
scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
|
||||
scrq->cur = 0;
|
||||
atomic_set(&scrq->used, 0);
|
||||
scrq->rx_skb_top = NULL;
|
||||
spin_lock_init(&scrq->lock);
|
||||
|
||||
|
@ -1355,14 +1366,28 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
|
|||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
if (txbuff->last_frag)
|
||||
if (txbuff->last_frag) {
|
||||
atomic_dec(&scrq->used);
|
||||
|
||||
if (atomic_read(&scrq->used) <=
|
||||
(adapter->req_tx_entries_per_subcrq / 2) &&
|
||||
netif_subqueue_stopped(adapter->netdev,
|
||||
txbuff->skb)) {
|
||||
netif_wake_subqueue(adapter->netdev,
|
||||
scrq->pool_index);
|
||||
netdev_dbg(adapter->netdev,
|
||||
"Started queue %d\n",
|
||||
scrq->pool_index);
|
||||
}
|
||||
|
||||
dev_kfree_skb_any(txbuff->skb);
|
||||
}
|
||||
|
||||
adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
|
||||
producer_index] = index;
|
||||
adapter->tx_pool[pool].producer_index =
|
||||
(adapter->tx_pool[pool].producer_index + 1) %
|
||||
adapter->max_tx_entries_per_subcrq;
|
||||
adapter->req_tx_entries_per_subcrq;
|
||||
}
|
||||
/* remove tx_comp scrq*/
|
||||
next->tx_comp.first = 0;
|
||||
|
|
|
@ -863,6 +863,7 @@ struct ibmvnic_sub_crq_queue {
|
|||
spinlock_t lock;
|
||||
struct sk_buff *rx_skb_top;
|
||||
struct ibmvnic_adapter *adapter;
|
||||
atomic_t used;
|
||||
};
|
||||
|
||||
struct ibmvnic_long_term_buff {
|
||||
|
|
|
@ -14,6 +14,7 @@ config MLX5_CORE
|
|||
config MLX5_CORE_EN
|
||||
bool "Mellanox Technologies ConnectX-4 Ethernet support"
|
||||
depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
|
||||
depends on IPV6=y || IPV6=n || MLX5_CORE=m
|
||||
imply PTP_1588_CLOCK
|
||||
default n
|
||||
---help---
|
||||
|
|
|
@ -302,6 +302,9 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
|
|||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
struct mlx5e_dcbx *dcbx = &priv->dcbx;
|
||||
|
||||
if (mode & DCB_CAP_DCBX_LLD_MANAGED)
|
||||
return 1;
|
||||
|
||||
if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) {
|
||||
if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO)
|
||||
return 0;
|
||||
|
@ -315,13 +318,10 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
|
|||
return 1;
|
||||
}
|
||||
|
||||
if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
|
||||
if (!(mode & DCB_CAP_DCBX_HOST))
|
||||
return 1;
|
||||
|
||||
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
|
||||
!(mode & DCB_CAP_DCBX_VER_CEE) ||
|
||||
!(mode & DCB_CAP_DCBX_VER_IEEE) ||
|
||||
!(mode & DCB_CAP_DCBX_HOST))
|
||||
if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -204,9 +204,6 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
|
|||
struct iphdr *iph;
|
||||
|
||||
/* We are only going to peek, no need to clone the SKB */
|
||||
if (skb->protocol != htons(ETH_P_IP))
|
||||
goto out;
|
||||
|
||||
if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb))
|
||||
goto out;
|
||||
|
||||
|
@ -249,7 +246,7 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
|
|||
lbtp->loopback_ok = false;
|
||||
init_completion(&lbtp->comp);
|
||||
|
||||
lbtp->pt.type = htons(ETH_P_ALL);
|
||||
lbtp->pt.type = htons(ETH_P_IP);
|
||||
lbtp->pt.func = mlx5e_test_loopback_validate;
|
||||
lbtp->pt.dev = priv->netdev;
|
||||
lbtp->pt.af_packet_priv = lbtp;
|
||||
|
|
|
@ -48,9 +48,14 @@
|
|||
#include "eswitch.h"
|
||||
#include "vxlan.h"
|
||||
|
||||
enum {
|
||||
MLX5E_TC_FLOW_ESWITCH = BIT(0),
|
||||
};
|
||||
|
||||
struct mlx5e_tc_flow {
|
||||
struct rhash_head node;
|
||||
u64 cookie;
|
||||
u8 flags;
|
||||
struct mlx5_flow_handle *rule;
|
||||
struct list_head encap; /* flows sharing the same encap */
|
||||
struct mlx5_esw_flow_attr *attr;
|
||||
|
@ -177,7 +182,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
|
|||
mlx5_fc_destroy(priv->mdev, counter);
|
||||
}
|
||||
|
||||
if (esw && esw->mode == SRIOV_OFFLOADS) {
|
||||
if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
|
||||
mlx5_eswitch_del_vlan_action(esw, flow->attr);
|
||||
if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
|
||||
mlx5e_detach_encap(priv, flow);
|
||||
|
@ -598,6 +603,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
|||
}
|
||||
|
||||
static int parse_cls_flower(struct mlx5e_priv *priv,
|
||||
struct mlx5e_tc_flow *flow,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct tc_cls_flower_offload *f)
|
||||
{
|
||||
|
@ -609,7 +615,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
|
|||
|
||||
err = __parse_cls_flower(priv, spec, f, &min_inline);
|
||||
|
||||
if (!err && esw->mode == SRIOV_OFFLOADS &&
|
||||
if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) &&
|
||||
rep->vport != FDB_UPLINK_VPORT) {
|
||||
if (min_inline > esw->offloads.inline_mode) {
|
||||
netdev_warn(priv->netdev,
|
||||
|
@ -1132,23 +1138,19 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
|
|||
struct tc_cls_flower_offload *f)
|
||||
{
|
||||
struct mlx5e_tc_table *tc = &priv->fs.tc;
|
||||
int err = 0;
|
||||
bool fdb_flow = false;
|
||||
int err, attr_size = 0;
|
||||
u32 flow_tag, action;
|
||||
struct mlx5e_tc_flow *flow;
|
||||
struct mlx5_flow_spec *spec;
|
||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||
u8 flow_flags = 0;
|
||||
|
||||
if (esw && esw->mode == SRIOV_OFFLOADS)
|
||||
fdb_flow = true;
|
||||
|
||||
if (fdb_flow)
|
||||
flow = kzalloc(sizeof(*flow) +
|
||||
sizeof(struct mlx5_esw_flow_attr),
|
||||
GFP_KERNEL);
|
||||
else
|
||||
flow = kzalloc(sizeof(*flow), GFP_KERNEL);
|
||||
if (esw && esw->mode == SRIOV_OFFLOADS) {
|
||||
flow_flags = MLX5E_TC_FLOW_ESWITCH;
|
||||
attr_size = sizeof(struct mlx5_esw_flow_attr);
|
||||
}
|
||||
|
||||
flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
|
||||
spec = mlx5_vzalloc(sizeof(*spec));
|
||||
if (!spec || !flow) {
|
||||
err = -ENOMEM;
|
||||
|
@ -1156,12 +1158,13 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
|
|||
}
|
||||
|
||||
flow->cookie = f->cookie;
|
||||
flow->flags = flow_flags;
|
||||
|
||||
err = parse_cls_flower(priv, spec, f);
|
||||
err = parse_cls_flower(priv, flow, spec, f);
|
||||
if (err < 0)
|
||||
goto err_free;
|
||||
|
||||
if (fdb_flow) {
|
||||
if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
|
||||
flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
|
||||
err = parse_tc_fdb_actions(priv, f->exts, flow);
|
||||
if (err < 0)
|
||||
|
|
|
@ -1136,7 +1136,7 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
|
|||
u32 *match_criteria)
|
||||
{
|
||||
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
|
||||
struct list_head *prev = ft->node.children.prev;
|
||||
struct list_head *prev = &ft->node.children;
|
||||
unsigned int candidate_index = 0;
|
||||
struct mlx5_flow_group *fg;
|
||||
void *match_criteria_addr;
|
||||
|
|
|
@ -1352,6 +1352,7 @@ static int init_one(struct pci_dev *pdev,
|
|||
if (err)
|
||||
goto clean_load;
|
||||
|
||||
pci_save_state(pdev);
|
||||
return 0;
|
||||
|
||||
clean_load:
|
||||
|
@ -1407,9 +1408,8 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
|
|||
|
||||
mlx5_enter_error_state(dev);
|
||||
mlx5_unload_one(dev, priv, false);
|
||||
/* In case of kernel call save the pci state and drain the health wq */
|
||||
/* In case of kernel call drain the health wq */
|
||||
if (state) {
|
||||
pci_save_state(pdev);
|
||||
mlx5_drain_health_wq(dev);
|
||||
mlx5_pci_disable_device(dev);
|
||||
}
|
||||
|
@ -1461,6 +1461,7 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
|
|||
|
||||
pci_set_master(pdev);
|
||||
pci_restore_state(pdev);
|
||||
pci_save_state(pdev);
|
||||
|
||||
if (wait_vital(pdev)) {
|
||||
dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
|
||||
|
|
|
@ -769,7 +769,7 @@ static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid)
|
|||
#define MLXSW_REG_SPVM_ID 0x200F
|
||||
#define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */
|
||||
#define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */
|
||||
#define MLXSW_REG_SPVM_REC_MAX_COUNT 256
|
||||
#define MLXSW_REG_SPVM_REC_MAX_COUNT 255
|
||||
#define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \
|
||||
MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT)
|
||||
|
||||
|
@ -1702,7 +1702,7 @@ static inline void mlxsw_reg_sfmr_pack(char *payload,
|
|||
#define MLXSW_REG_SPVMLR_ID 0x2020
|
||||
#define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */
|
||||
#define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */
|
||||
#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 256
|
||||
#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 255
|
||||
#define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \
|
||||
MLXSW_REG_SPVMLR_REC_LEN * \
|
||||
MLXSW_REG_SPVMLR_REC_MAX_COUNT)
|
||||
|
|
|
@ -303,11 +303,11 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
|
|||
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
|
||||
ingress,
|
||||
MLXSW_SP_ACL_PROFILE_FLOWER);
|
||||
if (WARN_ON(IS_ERR(ruleset)))
|
||||
if (IS_ERR(ruleset))
|
||||
return;
|
||||
|
||||
rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
|
||||
if (!WARN_ON(!rule)) {
|
||||
if (rule) {
|
||||
mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
|
||||
mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
|
||||
}
|
||||
|
|
|
@ -422,8 +422,9 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
|
|||
u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
|
||||
u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
|
||||
u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
|
||||
u32 align = elems_per_page * DQ_RANGE_ALIGN;
|
||||
|
||||
p_conn->cid_count = roundup(p_conn->cid_count, elems_per_page);
|
||||
p_conn->cid_count = roundup(p_conn->cid_count, align);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2389,9 +2389,8 @@ qed_chain_alloc_sanity_check(struct qed_dev *cdev,
|
|||
* size/capacity fields are of a u32 type.
|
||||
*/
|
||||
if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 &&
|
||||
chain_size > 0x10000) ||
|
||||
(cnt_type == QED_CHAIN_CNT_TYPE_U32 &&
|
||||
chain_size > 0x100000000ULL)) {
|
||||
chain_size > ((u32)U16_MAX + 1)) ||
|
||||
(cnt_type == QED_CHAIN_CNT_TYPE_U32 && chain_size > U32_MAX)) {
|
||||
DP_NOTICE(cdev,
|
||||
"The actual chain size (0x%llx) is larger than the maximal possible value\n",
|
||||
chain_size);
|
||||
|
|
|
@ -190,6 +190,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
|
|||
p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
|
||||
p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
|
||||
p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
|
||||
p_init->ooo_enable = p_params->ooo_enable;
|
||||
p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
|
||||
p_params->ll2_ooo_queue_id;
|
||||
p_init->func_params.log_page_size = p_params->log_page_size;
|
||||
val = p_params->num_tasks;
|
||||
p_init->func_params.num_tasks = cpu_to_le16(val);
|
||||
|
@ -786,6 +789,23 @@ static void qed_iscsi_release_connection(struct qed_hwfn *p_hwfn,
|
|||
spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
|
||||
}
|
||||
|
||||
void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn,
|
||||
struct qed_iscsi_conn *p_conn)
|
||||
{
|
||||
qed_chain_free(p_hwfn->cdev, &p_conn->xhq);
|
||||
qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
|
||||
qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
|
||||
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
|
||||
sizeof(struct tcp_upload_params),
|
||||
p_conn->tcp_upload_params_virt_addr,
|
||||
p_conn->tcp_upload_params_phys_addr);
|
||||
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
|
||||
sizeof(struct scsi_terminate_extra_params),
|
||||
p_conn->queue_cnts_virt_addr,
|
||||
p_conn->queue_cnts_phys_addr);
|
||||
kfree(p_conn);
|
||||
}
|
||||
|
||||
struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
struct qed_iscsi_info *p_iscsi_info;
|
||||
|
@ -807,6 +827,17 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
|
|||
void qed_iscsi_free(struct qed_hwfn *p_hwfn,
|
||||
struct qed_iscsi_info *p_iscsi_info)
|
||||
{
|
||||
struct qed_iscsi_conn *p_conn = NULL;
|
||||
|
||||
while (!list_empty(&p_hwfn->p_iscsi_info->free_list)) {
|
||||
p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list,
|
||||
struct qed_iscsi_conn, list_entry);
|
||||
if (p_conn) {
|
||||
list_del(&p_conn->list_entry);
|
||||
qed_iscsi_free_connection(p_hwfn, p_conn);
|
||||
}
|
||||
}
|
||||
|
||||
kfree(p_iscsi_info);
|
||||
}
|
||||
|
||||
|
|
|
@ -211,6 +211,8 @@ static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
|
|||
/* If need to reuse or there's no replacement buffer, repost this */
|
||||
if (rc)
|
||||
goto out_post;
|
||||
dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
|
||||
cdev->ll2->rx_size, DMA_FROM_DEVICE);
|
||||
|
||||
skb = build_skb(buffer->data, 0);
|
||||
if (!skb) {
|
||||
|
@ -474,7 +476,7 @@ qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
|
|||
static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ll2_info *p_ll2_conn,
|
||||
union core_rx_cqe_union *p_cqe,
|
||||
unsigned long lock_flags,
|
||||
unsigned long *p_lock_flags,
|
||||
bool b_last_cqe)
|
||||
{
|
||||
struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
|
||||
|
@ -495,10 +497,10 @@ static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
|
|||
"Mismatch between active_descq and the LL2 Rx chain\n");
|
||||
list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
|
||||
|
||||
spin_unlock_irqrestore(&p_rx->lock, lock_flags);
|
||||
spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
|
||||
qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
|
||||
p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
|
||||
spin_lock_irqsave(&p_rx->lock, lock_flags);
|
||||
spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -538,7 +540,8 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
|
|||
break;
|
||||
case CORE_RX_CQE_TYPE_REGULAR:
|
||||
rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
|
||||
cqe, flags, b_last_cqe);
|
||||
cqe, &flags,
|
||||
b_last_cqe);
|
||||
break;
|
||||
default:
|
||||
rc = -EIO;
|
||||
|
@ -968,7 +971,7 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev,
|
|||
{
|
||||
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
|
||||
u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
|
||||
struct qed_ll2_conn ll2_info;
|
||||
struct qed_ll2_conn ll2_info = { 0 };
|
||||
int rc;
|
||||
|
||||
ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
|
||||
|
|
|
@ -159,6 +159,8 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
|
|||
if (!p_ooo_info->ooo_history.p_cqes)
|
||||
goto no_history_mem;
|
||||
|
||||
p_ooo_info->ooo_history.num_of_cqes = QED_MAX_NUM_OOO_HISTORY_ENTRIES;
|
||||
|
||||
return p_ooo_info;
|
||||
|
||||
no_history_mem:
|
||||
|
|
|
@ -1535,32 +1535,33 @@ static int smc_close(struct net_device *dev)
|
|||
* Ethtool support
|
||||
*/
|
||||
static int
|
||||
smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
|
||||
smc_ethtool_get_link_ksettings(struct net_device *dev,
|
||||
struct ethtool_link_ksettings *cmd)
|
||||
{
|
||||
struct smc_local *lp = netdev_priv(dev);
|
||||
int ret;
|
||||
|
||||
cmd->maxtxpkt = 1;
|
||||
cmd->maxrxpkt = 1;
|
||||
|
||||
if (lp->phy_type != 0) {
|
||||
spin_lock_irq(&lp->lock);
|
||||
ret = mii_ethtool_gset(&lp->mii, cmd);
|
||||
ret = mii_ethtool_get_link_ksettings(&lp->mii, cmd);
|
||||
spin_unlock_irq(&lp->lock);
|
||||
} else {
|
||||
cmd->supported = SUPPORTED_10baseT_Half |
|
||||
u32 supported = SUPPORTED_10baseT_Half |
|
||||
SUPPORTED_10baseT_Full |
|
||||
SUPPORTED_TP | SUPPORTED_AUI;
|
||||
|
||||
if (lp->ctl_rspeed == 10)
|
||||
ethtool_cmd_speed_set(cmd, SPEED_10);
|
||||
cmd->base.speed = SPEED_10;
|
||||
else if (lp->ctl_rspeed == 100)
|
||||
ethtool_cmd_speed_set(cmd, SPEED_100);
|
||||
cmd->base.speed = SPEED_100;
|
||||
|
||||
cmd->autoneg = AUTONEG_DISABLE;
|
||||
cmd->transceiver = XCVR_INTERNAL;
|
||||
cmd->port = 0;
|
||||
cmd->duplex = lp->tcr_cur_mode & TCR_SWFDUP ? DUPLEX_FULL : DUPLEX_HALF;
|
||||
cmd->base.autoneg = AUTONEG_DISABLE;
|
||||
cmd->base.port = 0;
|
||||
cmd->base.duplex = lp->tcr_cur_mode & TCR_SWFDUP ?
|
||||
DUPLEX_FULL : DUPLEX_HALF;
|
||||
|
||||
ethtool_convert_legacy_u32_to_link_mode(
|
||||
cmd->link_modes.supported, supported);
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
|
@ -1569,24 +1570,26 @@ smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
|
|||
}
|
||||
|
||||
static int
|
||||
smc_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
|
||||
smc_ethtool_set_link_ksettings(struct net_device *dev,
|
||||
const struct ethtool_link_ksettings *cmd)
|
||||
{
|
||||
struct smc_local *lp = netdev_priv(dev);
|
||||
int ret;
|
||||
|
||||
if (lp->phy_type != 0) {
|
||||
spin_lock_irq(&lp->lock);
|
||||
ret = mii_ethtool_sset(&lp->mii, cmd);
|
||||
ret = mii_ethtool_set_link_ksettings(&lp->mii, cmd);
|
||||
spin_unlock_irq(&lp->lock);
|
||||
} else {
|
||||
if (cmd->autoneg != AUTONEG_DISABLE ||
|
||||
cmd->speed != SPEED_10 ||
|
||||
(cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) ||
|
||||
(cmd->port != PORT_TP && cmd->port != PORT_AUI))
|
||||
if (cmd->base.autoneg != AUTONEG_DISABLE ||
|
||||
cmd->base.speed != SPEED_10 ||
|
||||
(cmd->base.duplex != DUPLEX_HALF &&
|
||||
cmd->base.duplex != DUPLEX_FULL) ||
|
||||
(cmd->base.port != PORT_TP && cmd->base.port != PORT_AUI))
|
||||
return -EINVAL;
|
||||
|
||||
// lp->port = cmd->port;
|
||||
lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL;
|
||||
// lp->port = cmd->base.port;
|
||||
lp->ctl_rfduplx = cmd->base.duplex == DUPLEX_FULL;
|
||||
|
||||
// if (netif_running(dev))
|
||||
// smc_set_port(dev);
|
||||
|
@ -1744,8 +1747,6 @@ static int smc_ethtool_seteeprom(struct net_device *dev,
|
|||
|
||||
|
||||
static const struct ethtool_ops smc_ethtool_ops = {
|
||||
.get_settings = smc_ethtool_getsettings,
|
||||
.set_settings = smc_ethtool_setsettings,
|
||||
.get_drvinfo = smc_ethtool_getdrvinfo,
|
||||
|
||||
.get_msglevel = smc_ethtool_getmsglevel,
|
||||
|
@ -1755,6 +1756,8 @@ static const struct ethtool_ops smc_ethtool_ops = {
|
|||
.get_eeprom_len = smc_ethtool_geteeprom_len,
|
||||
.get_eeprom = smc_ethtool_geteeprom,
|
||||
.set_eeprom = smc_ethtool_seteeprom,
|
||||
.get_link_ksettings = smc_ethtool_get_link_ksettings,
|
||||
.set_link_ksettings = smc_ethtool_set_link_ksettings,
|
||||
};
|
||||
|
||||
static const struct net_device_ops smc_netdev_ops = {
|
||||
|
|
|
@ -700,6 +700,8 @@ struct net_device_context {
|
|||
|
||||
u32 tx_checksum_mask;
|
||||
|
||||
u32 tx_send_table[VRSS_SEND_TAB_SIZE];
|
||||
|
||||
/* Ethtool settings */
|
||||
u8 duplex;
|
||||
u32 speed;
|
||||
|
@ -757,7 +759,6 @@ struct netvsc_device {
|
|||
|
||||
struct nvsp_message revoke_packet;
|
||||
|
||||
u32 send_table[VRSS_SEND_TAB_SIZE];
|
||||
u32 max_chn;
|
||||
u32 num_chn;
|
||||
spinlock_t sc_lock; /* Protects num_sc_offered variable */
|
||||
|
|
|
@ -1136,15 +1136,11 @@ static void netvsc_receive(struct net_device *ndev,
|
|||
static void netvsc_send_table(struct hv_device *hdev,
|
||||
struct nvsp_message *nvmsg)
|
||||
{
|
||||
struct netvsc_device *nvscdev;
|
||||
struct net_device *ndev = hv_get_drvdata(hdev);
|
||||
struct net_device_context *net_device_ctx = netdev_priv(ndev);
|
||||
int i;
|
||||
u32 count, *tab;
|
||||
|
||||
nvscdev = get_outbound_net_device(hdev);
|
||||
if (!nvscdev)
|
||||
return;
|
||||
|
||||
count = nvmsg->msg.v5_msg.send_table.count;
|
||||
if (count != VRSS_SEND_TAB_SIZE) {
|
||||
netdev_err(ndev, "Received wrong send-table size:%u\n", count);
|
||||
|
@ -1155,7 +1151,7 @@ static void netvsc_send_table(struct hv_device *hdev,
|
|||
nvmsg->msg.v5_msg.send_table.offset);
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
nvscdev->send_table[i] = tab[i];
|
||||
net_device_ctx->tx_send_table[i] = tab[i];
|
||||
}
|
||||
|
||||
static void netvsc_send_vf(struct net_device_context *net_device_ctx,
|
||||
|
|
|
@ -206,17 +206,15 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
|
|||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
{
|
||||
struct net_device_context *net_device_ctx = netdev_priv(ndev);
|
||||
struct netvsc_device *nvsc_dev = net_device_ctx->nvdev;
|
||||
unsigned int num_tx_queues = ndev->real_num_tx_queues;
|
||||
struct sock *sk = skb->sk;
|
||||
int q_idx = sk_tx_queue_get(sk);
|
||||
|
||||
if (q_idx < 0 || skb->ooo_okay ||
|
||||
q_idx >= ndev->real_num_tx_queues) {
|
||||
if (q_idx < 0 || skb->ooo_okay || q_idx >= num_tx_queues) {
|
||||
u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE);
|
||||
int new_idx;
|
||||
|
||||
new_idx = nvsc_dev->send_table[hash]
|
||||
% nvsc_dev->num_chn;
|
||||
new_idx = net_device_ctx->tx_send_table[hash] % num_tx_queues;
|
||||
|
||||
if (q_idx != new_idx && sk &&
|
||||
sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
|
||||
|
@ -225,9 +223,6 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
|
|||
q_idx = new_idx;
|
||||
}
|
||||
|
||||
if (unlikely(!nvsc_dev->chan_table[q_idx].channel))
|
||||
q_idx = 0;
|
||||
|
||||
return q_idx;
|
||||
}
|
||||
|
||||
|
|
|
@ -1883,17 +1883,6 @@ static int m88e1510_probe(struct phy_device *phydev)
|
|||
return m88e1510_hwmon_probe(phydev);
|
||||
}
|
||||
|
||||
static void marvell_remove(struct phy_device *phydev)
|
||||
{
|
||||
#ifdef CONFIG_HWMON
|
||||
|
||||
struct marvell_priv *priv = phydev->priv;
|
||||
|
||||
if (priv && priv->hwmon_dev)
|
||||
hwmon_device_unregister(priv->hwmon_dev);
|
||||
#endif
|
||||
}
|
||||
|
||||
static struct phy_driver marvell_drivers[] = {
|
||||
{
|
||||
.phy_id = MARVELL_PHY_ID_88E1101,
|
||||
|
@ -1974,7 +1963,6 @@ static struct phy_driver marvell_drivers[] = {
|
|||
.features = PHY_GBIT_FEATURES,
|
||||
.flags = PHY_HAS_INTERRUPT,
|
||||
.probe = &m88e1121_probe,
|
||||
.remove = &marvell_remove,
|
||||
.config_init = &m88e1121_config_init,
|
||||
.config_aneg = &m88e1121_config_aneg,
|
||||
.read_status = &marvell_read_status,
|
||||
|
@ -2087,7 +2075,6 @@ static struct phy_driver marvell_drivers[] = {
|
|||
.features = PHY_GBIT_FEATURES | SUPPORTED_FIBRE,
|
||||
.flags = PHY_HAS_INTERRUPT,
|
||||
.probe = &m88e1510_probe,
|
||||
.remove = &marvell_remove,
|
||||
.config_init = &m88e1510_config_init,
|
||||
.config_aneg = &m88e1510_config_aneg,
|
||||
.read_status = &marvell_read_status,
|
||||
|
@ -2109,7 +2096,6 @@ static struct phy_driver marvell_drivers[] = {
|
|||
.features = PHY_GBIT_FEATURES,
|
||||
.flags = PHY_HAS_INTERRUPT,
|
||||
.probe = m88e1510_probe,
|
||||
.remove = &marvell_remove,
|
||||
.config_init = &marvell_config_init,
|
||||
.config_aneg = &m88e1510_config_aneg,
|
||||
.read_status = &marvell_read_status,
|
||||
|
@ -2127,7 +2113,6 @@ static struct phy_driver marvell_drivers[] = {
|
|||
.phy_id_mask = MARVELL_PHY_ID_MASK,
|
||||
.name = "Marvell 88E1545",
|
||||
.probe = m88e1510_probe,
|
||||
.remove = &marvell_remove,
|
||||
.features = PHY_GBIT_FEATURES,
|
||||
.flags = PHY_HAS_INTERRUPT,
|
||||
.config_init = &marvell_config_init,
|
||||
|
|
|
@ -1864,7 +1864,7 @@ static struct phy_driver genphy_driver[] = {
|
|||
.phy_id = 0xffffffff,
|
||||
.phy_id_mask = 0xffffffff,
|
||||
.name = "Generic PHY",
|
||||
.soft_reset = genphy_soft_reset,
|
||||
.soft_reset = genphy_no_soft_reset,
|
||||
.config_init = genphy_config_init,
|
||||
.features = PHY_GBIT_FEATURES | SUPPORTED_MII |
|
||||
SUPPORTED_AUI | SUPPORTED_FIBRE |
|
||||
|
|
|
@ -491,13 +491,14 @@ static int ks8995_probe(struct spi_device *spi)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
ks->regs_attr.size = ks->chip->regs_size;
|
||||
memcpy(&ks->regs_attr, &ks8995_registers_attr, sizeof(ks->regs_attr));
|
||||
ks->regs_attr.size = ks->chip->regs_size;
|
||||
|
||||
err = ks8995_reset(ks);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
sysfs_attr_init(&ks->regs_attr.attr);
|
||||
err = sysfs_create_bin_file(&spi->dev.kobj, &ks->regs_attr);
|
||||
if (err) {
|
||||
dev_err(&spi->dev, "unable to create sysfs file, err=%d\n",
|
||||
|
|
|
@ -2072,6 +2072,7 @@ static int team_dev_type_check_change(struct net_device *dev,
|
|||
static void team_setup(struct net_device *dev)
|
||||
{
|
||||
ether_setup(dev);
|
||||
dev->max_mtu = ETH_MAX_MTU;
|
||||
|
||||
dev->netdev_ops = &team_netdev_ops;
|
||||
dev->ethtool_ops = &team_ethtool_ops;
|
||||
|
|
|
@ -822,7 +822,18 @@ static void tun_net_uninit(struct net_device *dev)
|
|||
/* Net device open. */
|
||||
static int tun_net_open(struct net_device *dev)
|
||||
{
|
||||
struct tun_struct *tun = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
netif_tx_start_all_queues(dev);
|
||||
|
||||
for (i = 0; i < tun->numqueues; i++) {
|
||||
struct tun_file *tfile;
|
||||
|
||||
tfile = rtnl_dereference(tun->tfiles[i]);
|
||||
tfile->socket.sk->sk_write_space(tfile->socket.sk);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1103,9 +1114,10 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
|
|||
if (!skb_array_empty(&tfile->tx_array))
|
||||
mask |= POLLIN | POLLRDNORM;
|
||||
|
||||
if (sock_writeable(sk) ||
|
||||
(!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
|
||||
sock_writeable(sk)))
|
||||
if (tun->dev->flags & IFF_UP &&
|
||||
(sock_writeable(sk) ||
|
||||
(!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
|
||||
sock_writeable(sk))))
|
||||
mask |= POLLOUT | POLLWRNORM;
|
||||
|
||||
if (tun->dev->reg_state != NETREG_REGISTERED)
|
||||
|
@ -2570,7 +2582,6 @@ static int __init tun_init(void)
|
|||
int ret = 0;
|
||||
|
||||
pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
|
||||
pr_info("%s\n", DRV_COPYRIGHT);
|
||||
|
||||
ret = rtnl_link_register(&tun_link_ops);
|
||||
if (ret) {
|
||||
|
|
|
@ -340,6 +340,7 @@ static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
int len = skb->len;
|
||||
netdev_tx_t ret = is_ip_tx_frame(skb, dev);
|
||||
|
||||
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
|
||||
|
@ -347,7 +348,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
u64_stats_update_begin(&dstats->syncp);
|
||||
dstats->tx_pkts++;
|
||||
dstats->tx_bytes += skb->len;
|
||||
dstats->tx_bytes += len;
|
||||
u64_stats_update_end(&dstats->syncp);
|
||||
} else {
|
||||
this_cpu_inc(dev->dstats->tx_drps);
|
||||
|
|
|
@ -2976,6 +2976,44 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __vxlan_dev_create(struct net *net, struct net_device *dev,
|
||||
struct vxlan_config *conf)
|
||||
{
|
||||
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
|
||||
struct vxlan_dev *vxlan = netdev_priv(dev);
|
||||
int err;
|
||||
|
||||
err = vxlan_dev_configure(net, dev, conf, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
dev->ethtool_ops = &vxlan_ethtool_ops;
|
||||
|
||||
/* create an fdb entry for a valid default destination */
|
||||
if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
|
||||
err = vxlan_fdb_create(vxlan, all_zeros_mac,
|
||||
&vxlan->default_dst.remote_ip,
|
||||
NUD_REACHABLE | NUD_PERMANENT,
|
||||
NLM_F_EXCL | NLM_F_CREATE,
|
||||
vxlan->cfg.dst_port,
|
||||
vxlan->default_dst.remote_vni,
|
||||
vxlan->default_dst.remote_vni,
|
||||
vxlan->default_dst.remote_ifindex,
|
||||
NTF_SELF);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = register_netdevice(dev);
|
||||
if (err) {
|
||||
vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
|
||||
return err;
|
||||
}
|
||||
|
||||
list_add(&vxlan->next, &vn->vxlan_list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
|
||||
struct net_device *dev, struct vxlan_config *conf,
|
||||
bool changelink)
|
||||
|
@ -3172,8 +3210,6 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
|
|||
static int vxlan_newlink(struct net *src_net, struct net_device *dev,
|
||||
struct nlattr *tb[], struct nlattr *data[])
|
||||
{
|
||||
struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
|
||||
struct vxlan_dev *vxlan = netdev_priv(dev);
|
||||
struct vxlan_config conf;
|
||||
int err;
|
||||
|
||||
|
@ -3181,36 +3217,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
err = vxlan_dev_configure(src_net, dev, &conf, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
dev->ethtool_ops = &vxlan_ethtool_ops;
|
||||
|
||||
/* create an fdb entry for a valid default destination */
|
||||
if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
|
||||
err = vxlan_fdb_create(vxlan, all_zeros_mac,
|
||||
&vxlan->default_dst.remote_ip,
|
||||
NUD_REACHABLE | NUD_PERMANENT,
|
||||
NLM_F_EXCL | NLM_F_CREATE,
|
||||
vxlan->cfg.dst_port,
|
||||
vxlan->default_dst.remote_vni,
|
||||
vxlan->default_dst.remote_vni,
|
||||
vxlan->default_dst.remote_ifindex,
|
||||
NTF_SELF);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = register_netdevice(dev);
|
||||
if (err) {
|
||||
vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
|
||||
return err;
|
||||
}
|
||||
|
||||
list_add(&vxlan->next, &vn->vxlan_list);
|
||||
|
||||
return 0;
|
||||
return __vxlan_dev_create(src_net, dev, &conf);
|
||||
}
|
||||
|
||||
static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
|
||||
|
@ -3440,7 +3447,7 @@ struct net_device *vxlan_dev_create(struct net *net, const char *name,
|
|||
if (IS_ERR(dev))
|
||||
return dev;
|
||||
|
||||
err = vxlan_dev_configure(net, dev, conf, false);
|
||||
err = __vxlan_dev_create(net, dev, conf);
|
||||
if (err < 0) {
|
||||
free_netdev(dev);
|
||||
return ERR_PTR(err);
|
||||
|
|
|
@ -381,8 +381,8 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
|
|||
/* set bd status and length */
|
||||
bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
|
||||
|
||||
iowrite16be(bd_status, &bd->status);
|
||||
iowrite16be(skb->len, &bd->length);
|
||||
iowrite16be(bd_status, &bd->status);
|
||||
|
||||
/* Move to next BD in the ring */
|
||||
if (!(bd_status & T_W_S))
|
||||
|
@ -457,7 +457,7 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
|
|||
struct sk_buff *skb;
|
||||
hdlc_device *hdlc = dev_to_hdlc(dev);
|
||||
struct qe_bd *bd;
|
||||
u32 bd_status;
|
||||
u16 bd_status;
|
||||
u16 length, howmany = 0;
|
||||
u8 *bdbuffer;
|
||||
int i;
|
||||
|
|
|
@ -467,6 +467,9 @@ int i2400mu_probe(struct usb_interface *iface,
|
|||
struct i2400mu *i2400mu;
|
||||
struct usb_device *usb_dev = interface_to_usbdev(iface);
|
||||
|
||||
if (iface->cur_altsetting->desc.bNumEndpoints < 4)
|
||||
return -ENODEV;
|
||||
|
||||
if (usb_dev->speed != USB_SPEED_HIGH)
|
||||
dev_err(dev, "device not connected as high speed\n");
|
||||
|
||||
|
|
|
@ -165,13 +165,17 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
{
|
||||
struct xenvif *vif = netdev_priv(dev);
|
||||
struct xenvif_queue *queue = NULL;
|
||||
unsigned int num_queues = vif->num_queues;
|
||||
unsigned int num_queues;
|
||||
u16 index;
|
||||
struct xenvif_rx_cb *cb;
|
||||
|
||||
BUG_ON(skb->dev != dev);
|
||||
|
||||
/* Drop the packet if queues are not set up */
|
||||
/* Drop the packet if queues are not set up.
|
||||
* This handler should be called inside an RCU read section
|
||||
* so we don't need to enter it here explicitly.
|
||||
*/
|
||||
num_queues = READ_ONCE(vif->num_queues);
|
||||
if (num_queues < 1)
|
||||
goto drop;
|
||||
|
||||
|
@ -222,18 +226,18 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
|
|||
{
|
||||
struct xenvif *vif = netdev_priv(dev);
|
||||
struct xenvif_queue *queue = NULL;
|
||||
unsigned int num_queues;
|
||||
u64 rx_bytes = 0;
|
||||
u64 rx_packets = 0;
|
||||
u64 tx_bytes = 0;
|
||||
u64 tx_packets = 0;
|
||||
unsigned int index;
|
||||
|
||||
spin_lock(&vif->lock);
|
||||
if (vif->queues == NULL)
|
||||
goto out;
|
||||
rcu_read_lock();
|
||||
num_queues = READ_ONCE(vif->num_queues);
|
||||
|
||||
/* Aggregate tx and rx stats from each queue */
|
||||
for (index = 0; index < vif->num_queues; ++index) {
|
||||
for (index = 0; index < num_queues; ++index) {
|
||||
queue = &vif->queues[index];
|
||||
rx_bytes += queue->stats.rx_bytes;
|
||||
rx_packets += queue->stats.rx_packets;
|
||||
|
@ -241,8 +245,7 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
|
|||
tx_packets += queue->stats.tx_packets;
|
||||
}
|
||||
|
||||
out:
|
||||
spin_unlock(&vif->lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
vif->dev->stats.rx_bytes = rx_bytes;
|
||||
vif->dev->stats.rx_packets = rx_packets;
|
||||
|
@ -378,10 +381,13 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
|
|||
struct ethtool_stats *stats, u64 * data)
|
||||
{
|
||||
struct xenvif *vif = netdev_priv(dev);
|
||||
unsigned int num_queues = vif->num_queues;
|
||||
unsigned int num_queues;
|
||||
int i;
|
||||
unsigned int queue_index;
|
||||
|
||||
rcu_read_lock();
|
||||
num_queues = READ_ONCE(vif->num_queues);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
|
||||
unsigned long accum = 0;
|
||||
for (queue_index = 0; queue_index < num_queues; ++queue_index) {
|
||||
|
@ -390,6 +396,8 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
|
|||
}
|
||||
data[i] = accum;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
|
||||
|
|
|
@ -214,7 +214,7 @@ static void xenvif_fatal_tx_err(struct xenvif *vif)
|
|||
netdev_err(vif->dev, "fatal error; disabling device\n");
|
||||
vif->disabled = true;
|
||||
/* Disable the vif from queue 0's kthread */
|
||||
if (vif->queues)
|
||||
if (vif->num_queues)
|
||||
xenvif_kick_thread(&vif->queues[0]);
|
||||
}
|
||||
|
||||
|
|
|
@ -495,26 +495,26 @@ static void backend_disconnect(struct backend_info *be)
|
|||
struct xenvif *vif = be->vif;
|
||||
|
||||
if (vif) {
|
||||
unsigned int num_queues = vif->num_queues;
|
||||
unsigned int queue_index;
|
||||
struct xenvif_queue *queues;
|
||||
|
||||
xen_unregister_watchers(vif);
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
xenvif_debugfs_delif(vif);
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
xenvif_disconnect_data(vif);
|
||||
for (queue_index = 0;
|
||||
queue_index < vif->num_queues;
|
||||
++queue_index)
|
||||
|
||||
/* At this point some of the handlers may still be active
|
||||
* so we need to have additional synchronization here.
|
||||
*/
|
||||
vif->num_queues = 0;
|
||||
synchronize_net();
|
||||
|
||||
for (queue_index = 0; queue_index < num_queues; ++queue_index)
|
||||
xenvif_deinit_queue(&vif->queues[queue_index]);
|
||||
|
||||
spin_lock(&vif->lock);
|
||||
queues = vif->queues;
|
||||
vif->num_queues = 0;
|
||||
vfree(vif->queues);
|
||||
vif->queues = NULL;
|
||||
spin_unlock(&vif->lock);
|
||||
|
||||
vfree(queues);
|
||||
|
||||
xenvif_disconnect_ctrl(vif);
|
||||
}
|
||||
|
|
|
@ -532,7 +532,7 @@ lnet_sock_accept(struct socket **newsockp, struct socket *sock)
|
|||
|
||||
newsock->ops = sock->ops;
|
||||
|
||||
rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
|
||||
rc = sock->ops->accept(sock, newsock, O_NONBLOCK, false);
|
||||
if (rc == -EAGAIN) {
|
||||
/* Nothing ready, so wait for activity */
|
||||
init_waitqueue_entry(&wait, current);
|
||||
|
@ -540,7 +540,7 @@ lnet_sock_accept(struct socket **newsockp, struct socket *sock)
|
|||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
schedule();
|
||||
remove_wait_queue(sk_sleep(sock->sk), &wait);
|
||||
rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
|
||||
rc = sock->ops->accept(sock, newsock, O_NONBLOCK, false);
|
||||
}
|
||||
|
||||
if (rc)
|
||||
|
|
|
@ -743,7 +743,7 @@ static int tcp_accept_from_sock(struct connection *con)
|
|||
newsock->type = con->sock->type;
|
||||
newsock->ops = con->sock->ops;
|
||||
|
||||
result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK);
|
||||
result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK, true);
|
||||
if (result < 0)
|
||||
goto accept_err;
|
||||
|
||||
|
|
|
@ -1863,7 +1863,7 @@ static int o2net_accept_one(struct socket *sock, int *more)
|
|||
|
||||
new_sock->type = sock->type;
|
||||
new_sock->ops = sock->ops;
|
||||
ret = sock->ops->accept(sock, new_sock, O_NONBLOCK);
|
||||
ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, false);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ int af_alg_unregister_type(const struct af_alg_type *type);
|
|||
|
||||
int af_alg_release(struct socket *sock);
|
||||
void af_alg_release_parent(struct sock *sk);
|
||||
int af_alg_accept(struct sock *sk, struct socket *newsock);
|
||||
int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern);
|
||||
|
||||
int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
|
||||
void af_alg_free_sg(struct af_alg_sgl *sgl);
|
||||
|
|
|
@ -163,6 +163,7 @@ struct dccp_request_sock {
|
|||
__u64 dreq_isr;
|
||||
__u64 dreq_gsr;
|
||||
__be32 dreq_service;
|
||||
spinlock_t dreq_lock;
|
||||
struct list_head dreq_featneg;
|
||||
__u32 dreq_timestamp_echo;
|
||||
__u32 dreq_timestamp_time;
|
||||
|
|
|
@ -409,6 +409,7 @@ struct bpf_prog {
|
|||
u16 pages; /* Number of allocated pages */
|
||||
kmemcheck_bitfield_begin(meta);
|
||||
u16 jited:1, /* Is our filter JIT'ed? */
|
||||
locked:1, /* Program image locked? */
|
||||
gpl_compatible:1, /* Is filter GPL compatible? */
|
||||
cb_access:1, /* Is control block accessed? */
|
||||
dst_needed:1, /* Do we need dst entry? */
|
||||
|
@ -554,22 +555,29 @@ static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
|
|||
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
|
||||
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
|
||||
{
|
||||
set_memory_ro((unsigned long)fp, fp->pages);
|
||||
fp->locked = 1;
|
||||
WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
|
||||
}
|
||||
|
||||
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
|
||||
{
|
||||
set_memory_rw((unsigned long)fp, fp->pages);
|
||||
if (fp->locked) {
|
||||
WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
|
||||
/* In case set_memory_rw() fails, we want to be the first
|
||||
* to crash here instead of some random place later on.
|
||||
*/
|
||||
fp->locked = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
|
||||
{
|
||||
set_memory_ro((unsigned long)hdr, hdr->pages);
|
||||
WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
|
||||
}
|
||||
|
||||
static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
|
||||
{
|
||||
set_memory_rw((unsigned long)hdr, hdr->pages);
|
||||
WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
|
||||
}
|
||||
#else
|
||||
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
|
||||
|
|
|
@ -29,6 +29,11 @@ struct hlist_nulls_node {
|
|||
((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls))
|
||||
|
||||
#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member)
|
||||
|
||||
#define hlist_nulls_entry_safe(ptr, type, member) \
|
||||
({ typeof(ptr) ____ptr = (ptr); \
|
||||
!is_a_nulls(____ptr) ? hlist_nulls_entry(____ptr, type, member) : NULL; \
|
||||
})
|
||||
/**
|
||||
* ptr_is_a_nulls - Test if a ptr is a nulls
|
||||
* @ptr: ptr to be tested
|
||||
|
|
|
@ -146,7 +146,7 @@ struct proto_ops {
|
|||
int (*socketpair)(struct socket *sock1,
|
||||
struct socket *sock2);
|
||||
int (*accept) (struct socket *sock,
|
||||
struct socket *newsock, int flags);
|
||||
struct socket *newsock, int flags, bool kern);
|
||||
int (*getname) (struct socket *sock,
|
||||
struct sockaddr *addr,
|
||||
int *sockaddr_len, int peer);
|
||||
|
|
|
@ -837,6 +837,10 @@ int genphy_read_status(struct phy_device *phydev);
|
|||
int genphy_suspend(struct phy_device *phydev);
|
||||
int genphy_resume(struct phy_device *phydev);
|
||||
int genphy_soft_reset(struct phy_device *phydev);
|
||||
static inline int genphy_no_soft_reset(struct phy_device *phydev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
void phy_driver_unregister(struct phy_driver *drv);
|
||||
void phy_drivers_unregister(struct phy_driver *drv, int n);
|
||||
int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
|
||||
|
|
|
@ -156,5 +156,19 @@ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
|
|||
({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
|
||||
pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
|
||||
|
||||
/**
|
||||
* hlist_nulls_for_each_entry_safe -
|
||||
* iterate over list of given type safe against removal of list entry
|
||||
* @tpos: the type * to use as a loop cursor.
|
||||
* @pos: the &struct hlist_nulls_node to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the hlist_nulls_node within the struct.
|
||||
*/
|
||||
#define hlist_nulls_for_each_entry_safe(tpos, pos, head, member) \
|
||||
for (({barrier();}), \
|
||||
pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
|
||||
(!is_a_nulls(pos)) && \
|
||||
({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); \
|
||||
pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)); 1; });)
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -20,7 +20,8 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
|
|||
int addr_len, int flags, int is_sendmsg);
|
||||
int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
|
||||
int addr_len, int flags);
|
||||
int inet_accept(struct socket *sock, struct socket *newsock, int flags);
|
||||
int inet_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
bool kern);
|
||||
int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size);
|
||||
ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
|
||||
size_t size, int flags);
|
||||
|
|
|
@ -258,7 +258,7 @@ inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
|
|||
return (unsigned long)min_t(u64, when, max_when);
|
||||
}
|
||||
|
||||
struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
|
||||
struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern);
|
||||
|
||||
int inet_csk_get_port(struct sock *sk, unsigned short snum);
|
||||
|
||||
|
|
|
@ -476,7 +476,8 @@ struct sctp_pf {
|
|||
int (*send_verify) (struct sctp_sock *, union sctp_addr *);
|
||||
int (*supported_addrs)(const struct sctp_sock *, __be16 *);
|
||||
struct sock *(*create_accept_sk) (struct sock *sk,
|
||||
struct sctp_association *asoc);
|
||||
struct sctp_association *asoc,
|
||||
bool kern);
|
||||
int (*addr_to_user)(struct sctp_sock *sk, union sctp_addr *addr);
|
||||
void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
|
||||
void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
|
||||
|
|
|
@ -236,6 +236,7 @@ struct sock_common {
|
|||
* @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
|
||||
* @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
|
||||
* @sk_lock: synchronizer
|
||||
* @sk_kern_sock: True if sock is using kernel lock classes
|
||||
* @sk_rcvbuf: size of receive buffer in bytes
|
||||
* @sk_wq: sock wait queue and async head
|
||||
* @sk_rx_dst: receive input route used by early demux
|
||||
|
@ -430,7 +431,8 @@ struct sock {
|
|||
#endif
|
||||
|
||||
kmemcheck_bitfield_begin(flags);
|
||||
unsigned int sk_padding : 2,
|
||||
unsigned int sk_padding : 1,
|
||||
sk_kern_sock : 1,
|
||||
sk_no_check_tx : 1,
|
||||
sk_no_check_rx : 1,
|
||||
sk_userlocks : 4,
|
||||
|
@ -1015,7 +1017,8 @@ struct proto {
|
|||
int addr_len);
|
||||
int (*disconnect)(struct sock *sk, int flags);
|
||||
|
||||
struct sock * (*accept)(struct sock *sk, int flags, int *err);
|
||||
struct sock * (*accept)(struct sock *sk, int flags, int *err,
|
||||
bool kern);
|
||||
|
||||
int (*ioctl)(struct sock *sk, int cmd,
|
||||
unsigned long arg);
|
||||
|
@ -1573,7 +1576,7 @@ int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
|
|||
int sock_no_bind(struct socket *, struct sockaddr *, int);
|
||||
int sock_no_connect(struct socket *, struct sockaddr *, int, int);
|
||||
int sock_no_socketpair(struct socket *, struct socket *);
|
||||
int sock_no_accept(struct socket *, struct socket *, int);
|
||||
int sock_no_accept(struct socket *, struct socket *, int, bool);
|
||||
int sock_no_getname(struct socket *, struct sockaddr *, int *, int);
|
||||
unsigned int sock_no_poll(struct file *, struct socket *,
|
||||
struct poll_table_struct *);
|
||||
|
|
|
@ -64,7 +64,7 @@ struct packet_diag_mclist {
|
|||
__u32 pdmc_count;
|
||||
__u16 pdmc_type;
|
||||
__u16 pdmc_alen;
|
||||
__u8 pdmc_addr[MAX_ADDR_LEN];
|
||||
__u8 pdmc_addr[32]; /* MAX_ADDR_LEN */
|
||||
};
|
||||
|
||||
struct packet_diag_ring {
|
||||
|
|
|
@ -13,11 +13,12 @@
|
|||
#include <linux/bpf.h>
|
||||
#include <linux/jhash.h>
|
||||
#include <linux/filter.h>
|
||||
#include <linux/rculist_nulls.h>
|
||||
#include "percpu_freelist.h"
|
||||
#include "bpf_lru_list.h"
|
||||
|
||||
struct bucket {
|
||||
struct hlist_head head;
|
||||
struct hlist_nulls_head head;
|
||||
raw_spinlock_t lock;
|
||||
};
|
||||
|
||||
|
@ -44,9 +45,14 @@ enum extra_elem_state {
|
|||
/* each htab element is struct htab_elem + key + value */
|
||||
struct htab_elem {
|
||||
union {
|
||||
struct hlist_node hash_node;
|
||||
struct bpf_htab *htab;
|
||||
struct pcpu_freelist_node fnode;
|
||||
struct hlist_nulls_node hash_node;
|
||||
struct {
|
||||
void *padding;
|
||||
union {
|
||||
struct bpf_htab *htab;
|
||||
struct pcpu_freelist_node fnode;
|
||||
};
|
||||
};
|
||||
};
|
||||
union {
|
||||
struct rcu_head rcu;
|
||||
|
@ -162,7 +168,8 @@ static int prealloc_init(struct bpf_htab *htab)
|
|||
offsetof(struct htab_elem, lru_node),
|
||||
htab->elem_size, htab->map.max_entries);
|
||||
else
|
||||
pcpu_freelist_populate(&htab->freelist, htab->elems,
|
||||
pcpu_freelist_populate(&htab->freelist,
|
||||
htab->elems + offsetof(struct htab_elem, fnode),
|
||||
htab->elem_size, htab->map.max_entries);
|
||||
|
||||
return 0;
|
||||
|
@ -217,6 +224,11 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
|||
int err, i;
|
||||
u64 cost;
|
||||
|
||||
BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
|
||||
offsetof(struct htab_elem, hash_node.pprev));
|
||||
BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
|
||||
offsetof(struct htab_elem, hash_node.pprev));
|
||||
|
||||
if (lru && !capable(CAP_SYS_ADMIN))
|
||||
/* LRU implementation is much complicated than other
|
||||
* maps. Hence, limit to CAP_SYS_ADMIN for now.
|
||||
|
@ -326,7 +338,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
|||
goto free_htab;
|
||||
|
||||
for (i = 0; i < htab->n_buckets; i++) {
|
||||
INIT_HLIST_HEAD(&htab->buckets[i].head);
|
||||
INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
|
||||
raw_spin_lock_init(&htab->buckets[i].lock);
|
||||
}
|
||||
|
||||
|
@ -366,28 +378,52 @@ static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
|
|||
return &htab->buckets[hash & (htab->n_buckets - 1)];
|
||||
}
|
||||
|
||||
static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
|
||||
static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
|
||||
{
|
||||
return &__select_bucket(htab, hash)->head;
|
||||
}
|
||||
|
||||
static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash,
|
||||
/* this lookup function can only be called with bucket lock taken */
|
||||
static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
|
||||
void *key, u32 key_size)
|
||||
{
|
||||
struct hlist_nulls_node *n;
|
||||
struct htab_elem *l;
|
||||
|
||||
hlist_for_each_entry_rcu(l, head, hash_node)
|
||||
hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
|
||||
if (l->hash == hash && !memcmp(&l->key, key, key_size))
|
||||
return l;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* can be called without bucket lock. it will repeat the loop in
|
||||
* the unlikely event when elements moved from one bucket into another
|
||||
* while link list is being walked
|
||||
*/
|
||||
static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
|
||||
u32 hash, void *key,
|
||||
u32 key_size, u32 n_buckets)
|
||||
{
|
||||
struct hlist_nulls_node *n;
|
||||
struct htab_elem *l;
|
||||
|
||||
again:
|
||||
hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
|
||||
if (l->hash == hash && !memcmp(&l->key, key, key_size))
|
||||
return l;
|
||||
|
||||
if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
|
||||
goto again;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Called from syscall or from eBPF program */
|
||||
static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
|
||||
struct hlist_head *head;
|
||||
struct hlist_nulls_head *head;
|
||||
struct htab_elem *l;
|
||||
u32 hash, key_size;
|
||||
|
||||
|
@ -400,7 +436,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
|
|||
|
||||
head = select_bucket(htab, hash);
|
||||
|
||||
l = lookup_elem_raw(head, hash, key, key_size);
|
||||
l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
|
||||
|
||||
return l;
|
||||
}
|
||||
|
@ -433,8 +469,9 @@ static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
|
|||
static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
|
||||
{
|
||||
struct bpf_htab *htab = (struct bpf_htab *)arg;
|
||||
struct htab_elem *l, *tgt_l;
|
||||
struct hlist_head *head;
|
||||
struct htab_elem *l = NULL, *tgt_l;
|
||||
struct hlist_nulls_head *head;
|
||||
struct hlist_nulls_node *n;
|
||||
unsigned long flags;
|
||||
struct bucket *b;
|
||||
|
||||
|
@ -444,9 +481,9 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
|
|||
|
||||
raw_spin_lock_irqsave(&b->lock, flags);
|
||||
|
||||
hlist_for_each_entry_rcu(l, head, hash_node)
|
||||
hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
|
||||
if (l == tgt_l) {
|
||||
hlist_del_rcu(&l->hash_node);
|
||||
hlist_nulls_del_rcu(&l->hash_node);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -459,7 +496,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
|
|||
static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
|
||||
{
|
||||
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
|
||||
struct hlist_head *head;
|
||||
struct hlist_nulls_head *head;
|
||||
struct htab_elem *l, *next_l;
|
||||
u32 hash, key_size;
|
||||
int i;
|
||||
|
@ -473,7 +510,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
|
|||
head = select_bucket(htab, hash);
|
||||
|
||||
/* lookup the key */
|
||||
l = lookup_elem_raw(head, hash, key, key_size);
|
||||
l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
|
||||
|
||||
if (!l) {
|
||||
i = 0;
|
||||
|
@ -481,7 +518,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
|
|||
}
|
||||
|
||||
/* key was found, get next key in the same bucket */
|
||||
next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
|
||||
next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
|
||||
struct htab_elem, hash_node);
|
||||
|
||||
if (next_l) {
|
||||
|
@ -500,7 +537,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
|
|||
head = select_bucket(htab, i);
|
||||
|
||||
/* pick first element in the bucket */
|
||||
next_l = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
|
||||
next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
|
||||
struct htab_elem, hash_node);
|
||||
if (next_l) {
|
||||
/* if it's not empty, just return it */
|
||||
|
@ -582,9 +619,13 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
|
|||
int err = 0;
|
||||
|
||||
if (prealloc) {
|
||||
l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist);
|
||||
if (!l_new)
|
||||
struct pcpu_freelist_node *l;
|
||||
|
||||
l = pcpu_freelist_pop(&htab->freelist);
|
||||
if (!l)
|
||||
err = -E2BIG;
|
||||
else
|
||||
l_new = container_of(l, struct htab_elem, fnode);
|
||||
} else {
|
||||
if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
|
||||
atomic_dec(&htab->count);
|
||||
|
@ -661,7 +702,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
|
|||
{
|
||||
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
|
||||
struct htab_elem *l_new = NULL, *l_old;
|
||||
struct hlist_head *head;
|
||||
struct hlist_nulls_head *head;
|
||||
unsigned long flags;
|
||||
struct bucket *b;
|
||||
u32 key_size, hash;
|
||||
|
@ -700,9 +741,9 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
|
|||
/* add new element to the head of the list, so that
|
||||
* concurrent search will find it before old elem
|
||||
*/
|
||||
hlist_add_head_rcu(&l_new->hash_node, head);
|
||||
hlist_nulls_add_head_rcu(&l_new->hash_node, head);
|
||||
if (l_old) {
|
||||
hlist_del_rcu(&l_old->hash_node);
|
||||
hlist_nulls_del_rcu(&l_old->hash_node);
|
||||
free_htab_elem(htab, l_old);
|
||||
}
|
||||
ret = 0;
|
||||
|
@ -716,7 +757,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
|
|||
{
|
||||
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
|
||||
struct htab_elem *l_new, *l_old = NULL;
|
||||
struct hlist_head *head;
|
||||
struct hlist_nulls_head *head;
|
||||
unsigned long flags;
|
||||
struct bucket *b;
|
||||
u32 key_size, hash;
|
||||
|
@ -757,10 +798,10 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
|
|||
/* add new element to the head of the list, so that
|
||||
* concurrent search will find it before old elem
|
||||
*/
|
||||
hlist_add_head_rcu(&l_new->hash_node, head);
|
||||
hlist_nulls_add_head_rcu(&l_new->hash_node, head);
|
||||
if (l_old) {
|
||||
bpf_lru_node_set_ref(&l_new->lru_node);
|
||||
hlist_del_rcu(&l_old->hash_node);
|
||||
hlist_nulls_del_rcu(&l_old->hash_node);
|
||||
}
|
||||
ret = 0;
|
||||
|
||||
|
@ -781,7 +822,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
|
|||
{
|
||||
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
|
||||
struct htab_elem *l_new = NULL, *l_old;
|
||||
struct hlist_head *head;
|
||||
struct hlist_nulls_head *head;
|
||||
unsigned long flags;
|
||||
struct bucket *b;
|
||||
u32 key_size, hash;
|
||||
|
@ -820,7 +861,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
|
|||
ret = PTR_ERR(l_new);
|
||||
goto err;
|
||||
}
|
||||
hlist_add_head_rcu(&l_new->hash_node, head);
|
||||
hlist_nulls_add_head_rcu(&l_new->hash_node, head);
|
||||
}
|
||||
ret = 0;
|
||||
err:
|
||||
|
@ -834,7 +875,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
|
|||
{
|
||||
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
|
||||
struct htab_elem *l_new = NULL, *l_old;
|
||||
struct hlist_head *head;
|
||||
struct hlist_nulls_head *head;
|
||||
unsigned long flags;
|
||||
struct bucket *b;
|
||||
u32 key_size, hash;
|
||||
|
@ -882,7 +923,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
|
|||
} else {
|
||||
pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
|
||||
value, onallcpus);
|
||||
hlist_add_head_rcu(&l_new->hash_node, head);
|
||||
hlist_nulls_add_head_rcu(&l_new->hash_node, head);
|
||||
l_new = NULL;
|
||||
}
|
||||
ret = 0;
|
||||
|
@ -910,7 +951,7 @@ static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
|
|||
static int htab_map_delete_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
|
||||
struct hlist_head *head;
|
||||
struct hlist_nulls_head *head;
|
||||
struct bucket *b;
|
||||
struct htab_elem *l;
|
||||
unsigned long flags;
|
||||
|
@ -930,7 +971,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
|
|||
l = lookup_elem_raw(head, hash, key, key_size);
|
||||
|
||||
if (l) {
|
||||
hlist_del_rcu(&l->hash_node);
|
||||
hlist_nulls_del_rcu(&l->hash_node);
|
||||
free_htab_elem(htab, l);
|
||||
ret = 0;
|
||||
}
|
||||
|
@ -942,7 +983,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
|
|||
static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
|
||||
struct hlist_head *head;
|
||||
struct hlist_nulls_head *head;
|
||||
struct bucket *b;
|
||||
struct htab_elem *l;
|
||||
unsigned long flags;
|
||||
|
@ -962,7 +1003,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
|
|||
l = lookup_elem_raw(head, hash, key, key_size);
|
||||
|
||||
if (l) {
|
||||
hlist_del_rcu(&l->hash_node);
|
||||
hlist_nulls_del_rcu(&l->hash_node);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
|
@ -977,12 +1018,12 @@ static void delete_all_elements(struct bpf_htab *htab)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < htab->n_buckets; i++) {
|
||||
struct hlist_head *head = select_bucket(htab, i);
|
||||
struct hlist_node *n;
|
||||
struct hlist_nulls_head *head = select_bucket(htab, i);
|
||||
struct hlist_nulls_node *n;
|
||||
struct htab_elem *l;
|
||||
|
||||
hlist_for_each_entry_safe(l, n, head, hash_node) {
|
||||
hlist_del_rcu(&l->hash_node);
|
||||
hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
|
||||
hlist_nulls_del_rcu(&l->hash_node);
|
||||
if (l->state != HTAB_EXTRA_ELEM_USED)
|
||||
htab_elem_free(htab, l);
|
||||
}
|
||||
|
|
|
@ -500,9 +500,15 @@ static void trie_free(struct bpf_map *map)
|
|||
raw_spin_unlock(&trie->lock);
|
||||
}
|
||||
|
||||
static int trie_get_next_key(struct bpf_map *map, void *key, void *next_key)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
static const struct bpf_map_ops trie_ops = {
|
||||
.map_alloc = trie_alloc,
|
||||
.map_free = trie_free,
|
||||
.map_get_next_key = trie_get_next_key,
|
||||
.map_lookup_elem = trie_lookup_elem,
|
||||
.map_update_elem = trie_update_elem,
|
||||
.map_delete_elem = trie_delete_elem,
|
||||
|
|
|
@ -318,7 +318,8 @@ static int svc_listen(struct socket *sock, int backlog)
|
|||
return error;
|
||||
}
|
||||
|
||||
static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
|
||||
static int svc_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
bool kern)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct sk_buff *skb;
|
||||
|
@ -329,7 +330,7 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
|
|||
|
||||
lock_sock(sk);
|
||||
|
||||
error = svc_create(sock_net(sk), newsock, 0, 0);
|
||||
error = svc_create(sock_net(sk), newsock, 0, kern);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -1320,7 +1320,8 @@ static int __must_check ax25_connect(struct socket *sock,
|
|||
return err;
|
||||
}
|
||||
|
||||
static int ax25_accept(struct socket *sock, struct socket *newsock, int flags)
|
||||
static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
bool kern)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct sock *newsk;
|
||||
|
|
|
@ -301,7 +301,7 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
|
|||
}
|
||||
|
||||
static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
|
||||
int flags)
|
||||
int flags, bool kern)
|
||||
{
|
||||
DEFINE_WAIT_FUNC(wait, woken_wake_function);
|
||||
struct sock *sk = sock->sk, *nsk;
|
||||
|
|
|
@ -471,7 +471,8 @@ static int rfcomm_sock_listen(struct socket *sock, int backlog)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags)
|
||||
static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
bool kern)
|
||||
{
|
||||
DEFINE_WAIT_FUNC(wait, woken_wake_function);
|
||||
struct sock *sk = sock->sk, *nsk;
|
||||
|
|
|
@ -627,7 +627,7 @@ static int sco_sock_listen(struct socket *sock, int backlog)
|
|||
}
|
||||
|
||||
static int sco_sock_accept(struct socket *sock, struct socket *newsock,
|
||||
int flags)
|
||||
int flags, bool kern)
|
||||
{
|
||||
DEFINE_WAIT_FUNC(wait, woken_wake_function);
|
||||
struct sock *sk = sock->sk, *ch;
|
||||
|
|
|
@ -30,6 +30,7 @@ EXPORT_SYMBOL(br_should_route_hook);
|
|||
static int
|
||||
br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
br_drop_fake_rtable(skb);
|
||||
return netif_receive_skb(skb);
|
||||
}
|
||||
|
||||
|
|
|
@ -521,21 +521,6 @@ static unsigned int br_nf_pre_routing(void *priv,
|
|||
}
|
||||
|
||||
|
||||
/* PF_BRIDGE/LOCAL_IN ************************************************/
|
||||
/* The packet is locally destined, which requires a real
|
||||
* dst_entry, so detach the fake one. On the way up, the
|
||||
* packet would pass through PRE_ROUTING again (which already
|
||||
* took place when the packet entered the bridge), but we
|
||||
* register an IPv4 PRE_ROUTING 'sabotage' hook that will
|
||||
* prevent this from happening. */
|
||||
static unsigned int br_nf_local_in(void *priv,
|
||||
struct sk_buff *skb,
|
||||
const struct nf_hook_state *state)
|
||||
{
|
||||
br_drop_fake_rtable(skb);
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
/* PF_BRIDGE/FORWARD *************************************************/
|
||||
static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
|
@ -907,12 +892,6 @@ static struct nf_hook_ops br_nf_ops[] __read_mostly = {
|
|||
.hooknum = NF_BR_PRE_ROUTING,
|
||||
.priority = NF_BR_PRI_BRNF,
|
||||
},
|
||||
{
|
||||
.hook = br_nf_local_in,
|
||||
.pf = NFPROTO_BRIDGE,
|
||||
.hooknum = NF_BR_LOCAL_IN,
|
||||
.priority = NF_BR_PRI_BRNF,
|
||||
},
|
||||
{
|
||||
.hook = br_nf_forward_ip,
|
||||
.pf = NFPROTO_BRIDGE,
|
||||
|
|
|
@ -1304,6 +1304,7 @@ void netdev_notify_peers(struct net_device *dev)
|
|||
{
|
||||
rtnl_lock();
|
||||
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
|
||||
call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
|
||||
rtnl_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_notify_peers);
|
||||
|
|
|
@ -953,7 +953,7 @@ net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
|
|||
while (--i >= new_num) {
|
||||
struct kobject *kobj = &dev->_rx[i].kobj;
|
||||
|
||||
if (!list_empty(&dev_net(dev)->exit_list))
|
||||
if (!atomic_read(&dev_net(dev)->count))
|
||||
kobj->uevent_suppress = 1;
|
||||
if (dev->sysfs_rx_queue_group)
|
||||
sysfs_remove_group(kobj, dev->sysfs_rx_queue_group);
|
||||
|
@ -1371,7 +1371,7 @@ netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
|
|||
while (--i >= new_num) {
|
||||
struct netdev_queue *queue = dev->_tx + i;
|
||||
|
||||
if (!list_empty(&dev_net(dev)->exit_list))
|
||||
if (!atomic_read(&dev_net(dev)->count))
|
||||
queue->kobj.uevent_suppress = 1;
|
||||
#ifdef CONFIG_BQL
|
||||
sysfs_remove_group(&queue->kobj, &dql_group);
|
||||
|
@ -1558,7 +1558,7 @@ void netdev_unregister_kobject(struct net_device *ndev)
|
|||
{
|
||||
struct device *dev = &(ndev->dev);
|
||||
|
||||
if (!list_empty(&dev_net(ndev)->exit_list))
|
||||
if (!atomic_read(&dev_net(ndev)->count))
|
||||
dev_set_uevent_suppress(dev, 1);
|
||||
|
||||
kobject_get(&dev->kobj);
|
||||
|
|
|
@ -3828,13 +3828,14 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
|
|||
if (!skb_may_tx_timestamp(sk, false))
|
||||
return;
|
||||
|
||||
/* take a reference to prevent skb_orphan() from freeing the socket */
|
||||
sock_hold(sk);
|
||||
|
||||
*skb_hwtstamps(skb) = *hwtstamps;
|
||||
__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
|
||||
|
||||
sock_put(sk);
|
||||
/* Take a reference to prevent skb_orphan() from freeing the socket,
|
||||
* but only if the socket refcount is not zero.
|
||||
*/
|
||||
if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
|
||||
*skb_hwtstamps(skb) = *hwtstamps;
|
||||
__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
|
||||
sock_put(sk);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
|
||||
|
||||
|
@ -3893,7 +3894,7 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
|
|||
{
|
||||
struct sock *sk = skb->sk;
|
||||
struct sock_exterr_skb *serr;
|
||||
int err;
|
||||
int err = 1;
|
||||
|
||||
skb->wifi_acked_valid = 1;
|
||||
skb->wifi_acked = acked;
|
||||
|
@ -3903,14 +3904,15 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
|
|||
serr->ee.ee_errno = ENOMSG;
|
||||
serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
|
||||
|
||||
/* take a reference to prevent skb_orphan() from freeing the socket */
|
||||
sock_hold(sk);
|
||||
|
||||
err = sock_queue_err_skb(sk, skb);
|
||||
/* Take a reference to prevent skb_orphan() from freeing the socket,
|
||||
* but only if the socket refcount is not zero.
|
||||
*/
|
||||
if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
|
||||
err = sock_queue_err_skb(sk, skb);
|
||||
sock_put(sk);
|
||||
}
|
||||
if (err)
|
||||
kfree_skb(skb);
|
||||
|
||||
sock_put(sk);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
|
||||
|
||||
|
|
106
net/core/sock.c
106
net/core/sock.c
|
@ -197,66 +197,55 @@ EXPORT_SYMBOL(sk_net_capable);
|
|||
|
||||
/*
|
||||
* Each address family might have different locking rules, so we have
|
||||
* one slock key per address family:
|
||||
* one slock key per address family and separate keys for internal and
|
||||
* userspace sockets.
|
||||
*/
|
||||
static struct lock_class_key af_family_keys[AF_MAX];
|
||||
static struct lock_class_key af_family_kern_keys[AF_MAX];
|
||||
static struct lock_class_key af_family_slock_keys[AF_MAX];
|
||||
static struct lock_class_key af_family_kern_slock_keys[AF_MAX];
|
||||
|
||||
/*
|
||||
* Make lock validator output more readable. (we pre-construct these
|
||||
* strings build-time, so that runtime initialization of socket
|
||||
* locks is fast):
|
||||
*/
|
||||
|
||||
#define _sock_locks(x) \
|
||||
x "AF_UNSPEC", x "AF_UNIX" , x "AF_INET" , \
|
||||
x "AF_AX25" , x "AF_IPX" , x "AF_APPLETALK", \
|
||||
x "AF_NETROM", x "AF_BRIDGE" , x "AF_ATMPVC" , \
|
||||
x "AF_X25" , x "AF_INET6" , x "AF_ROSE" , \
|
||||
x "AF_DECnet", x "AF_NETBEUI" , x "AF_SECURITY" , \
|
||||
x "AF_KEY" , x "AF_NETLINK" , x "AF_PACKET" , \
|
||||
x "AF_ASH" , x "AF_ECONET" , x "AF_ATMSVC" , \
|
||||
x "AF_RDS" , x "AF_SNA" , x "AF_IRDA" , \
|
||||
x "AF_PPPOX" , x "AF_WANPIPE" , x "AF_LLC" , \
|
||||
x "27" , x "28" , x "AF_CAN" , \
|
||||
x "AF_TIPC" , x "AF_BLUETOOTH", x "IUCV" , \
|
||||
x "AF_RXRPC" , x "AF_ISDN" , x "AF_PHONET" , \
|
||||
x "AF_IEEE802154", x "AF_CAIF" , x "AF_ALG" , \
|
||||
x "AF_NFC" , x "AF_VSOCK" , x "AF_KCM" , \
|
||||
x "AF_QIPCRTR", x "AF_SMC" , x "AF_MAX"
|
||||
|
||||
static const char *const af_family_key_strings[AF_MAX+1] = {
|
||||
"sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
|
||||
"sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
|
||||
"sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
|
||||
"sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
|
||||
"sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
|
||||
"sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
|
||||
"sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
|
||||
"sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
|
||||
"sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
|
||||
"sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
|
||||
"sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
|
||||
"sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
|
||||
"sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
|
||||
"sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_KCM" ,
|
||||
"sk_lock-AF_QIPCRTR", "sk_lock-AF_SMC" , "sk_lock-AF_MAX"
|
||||
_sock_locks("sk_lock-")
|
||||
};
|
||||
static const char *const af_family_slock_key_strings[AF_MAX+1] = {
|
||||
"slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
|
||||
"slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
|
||||
"slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
|
||||
"slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
|
||||
"slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
|
||||
"slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
|
||||
"slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
|
||||
"slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
|
||||
"slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
|
||||
"slock-27" , "slock-28" , "slock-AF_CAN" ,
|
||||
"slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
|
||||
"slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
|
||||
"slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
|
||||
"slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_KCM" ,
|
||||
"slock-AF_QIPCRTR", "slock-AF_SMC" , "slock-AF_MAX"
|
||||
_sock_locks("slock-")
|
||||
};
|
||||
static const char *const af_family_clock_key_strings[AF_MAX+1] = {
|
||||
"clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
|
||||
"clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
|
||||
"clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
|
||||
"clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
|
||||
"clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
|
||||
"clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
|
||||
"clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
|
||||
"clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
|
||||
"clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
|
||||
"clock-27" , "clock-28" , "clock-AF_CAN" ,
|
||||
"clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
|
||||
"clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
|
||||
"clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
|
||||
"clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" ,
|
||||
"clock-AF_QIPCRTR", "clock-AF_SMC" , "clock-AF_MAX"
|
||||
_sock_locks("clock-")
|
||||
};
|
||||
|
||||
static const char *const af_family_kern_key_strings[AF_MAX+1] = {
|
||||
_sock_locks("k-sk_lock-")
|
||||
};
|
||||
static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
|
||||
_sock_locks("k-slock-")
|
||||
};
|
||||
static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
|
||||
_sock_locks("k-clock-")
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -264,6 +253,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
|
|||
* so split the lock classes by using a per-AF key:
|
||||
*/
|
||||
static struct lock_class_key af_callback_keys[AF_MAX];
|
||||
static struct lock_class_key af_kern_callback_keys[AF_MAX];
|
||||
|
||||
/* Take into consideration the size of the struct sk_buff overhead in the
|
||||
* determination of these values, since that is non-constant across
|
||||
|
@ -1293,7 +1283,16 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
|
|||
*/
|
||||
static inline void sock_lock_init(struct sock *sk)
|
||||
{
|
||||
sock_lock_init_class_and_name(sk,
|
||||
if (sk->sk_kern_sock)
|
||||
sock_lock_init_class_and_name(
|
||||
sk,
|
||||
af_family_kern_slock_key_strings[sk->sk_family],
|
||||
af_family_kern_slock_keys + sk->sk_family,
|
||||
af_family_kern_key_strings[sk->sk_family],
|
||||
af_family_kern_keys + sk->sk_family);
|
||||
else
|
||||
sock_lock_init_class_and_name(
|
||||
sk,
|
||||
af_family_slock_key_strings[sk->sk_family],
|
||||
af_family_slock_keys + sk->sk_family,
|
||||
af_family_key_strings[sk->sk_family],
|
||||
|
@ -1399,6 +1398,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
|
|||
* why we need sk_prot_creator -acme
|
||||
*/
|
||||
sk->sk_prot = sk->sk_prot_creator = prot;
|
||||
sk->sk_kern_sock = kern;
|
||||
sock_lock_init(sk);
|
||||
sk->sk_net_refcnt = kern ? 0 : 1;
|
||||
if (likely(sk->sk_net_refcnt))
|
||||
|
@ -2277,7 +2277,8 @@ int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
|
|||
}
|
||||
EXPORT_SYMBOL(sock_no_socketpair);
|
||||
|
||||
int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
|
||||
int sock_no_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
bool kern)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -2481,7 +2482,14 @@ void sock_init_data(struct socket *sock, struct sock *sk)
|
|||
}
|
||||
|
||||
rwlock_init(&sk->sk_callback_lock);
|
||||
lockdep_set_class_and_name(&sk->sk_callback_lock,
|
||||
if (sk->sk_kern_sock)
|
||||
lockdep_set_class_and_name(
|
||||
&sk->sk_callback_lock,
|
||||
af_kern_callback_keys + sk->sk_family,
|
||||
af_family_kern_clock_key_strings[sk->sk_family]);
|
||||
else
|
||||
lockdep_set_class_and_name(
|
||||
&sk->sk_callback_lock,
|
||||
af_callback_keys + sk->sk_family,
|
||||
af_family_clock_key_strings[sk->sk_family]);
|
||||
|
||||
|
|
|
@ -749,6 +749,7 @@ static void ccid2_hc_tx_exit(struct sock *sk)
|
|||
for (i = 0; i < hc->tx_seqbufc; i++)
|
||||
kfree(hc->tx_seqbuf[i]);
|
||||
hc->tx_seqbufc = 0;
|
||||
dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
|
||||
}
|
||||
|
||||
static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
|
||||
|
|
|
@ -289,7 +289,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
|
|||
|
||||
switch (type) {
|
||||
case ICMP_REDIRECT:
|
||||
dccp_do_redirect(skb, sk);
|
||||
if (!sock_owned_by_user(sk))
|
||||
dccp_do_redirect(skb, sk);
|
||||
goto out;
|
||||
case ICMP_SOURCE_QUENCH:
|
||||
/* Just silently ignore these. */
|
||||
|
|
|
@ -122,10 +122,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
|||
np = inet6_sk(sk);
|
||||
|
||||
if (type == NDISC_REDIRECT) {
|
||||
struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
|
||||
if (!sock_owned_by_user(sk)) {
|
||||
struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
|
||||
|
||||
if (dst)
|
||||
dst->ops->redirect(dst, sk, skb);
|
||||
if (dst)
|
||||
dst->ops->redirect(dst, sk, skb);
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -142,6 +142,13 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
|
|||
struct dccp_request_sock *dreq = dccp_rsk(req);
|
||||
bool own_req;
|
||||
|
||||
/* TCP/DCCP listeners became lockless.
|
||||
* DCCP stores complex state in its request_sock, so we need
|
||||
* a protection for them, now this code runs without being protected
|
||||
* by the parent (listener) lock.
|
||||
*/
|
||||
spin_lock_bh(&dreq->dreq_lock);
|
||||
|
||||
/* Check for retransmitted REQUEST */
|
||||
if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
|
||||
|
||||
|
@ -156,7 +163,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
|
|||
inet_rtx_syn_ack(sk, req);
|
||||
}
|
||||
/* Network Duplicate, discard packet */
|
||||
return NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
|
||||
|
@ -182,20 +189,20 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
|
|||
|
||||
child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
|
||||
req, &own_req);
|
||||
if (!child)
|
||||
goto listen_overflow;
|
||||
if (child) {
|
||||
child = inet_csk_complete_hashdance(sk, child, req, own_req);
|
||||
goto out;
|
||||
}
|
||||
|
||||
return inet_csk_complete_hashdance(sk, child, req, own_req);
|
||||
|
||||
listen_overflow:
|
||||
dccp_pr_debug("listen_overflow!\n");
|
||||
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
|
||||
drop:
|
||||
if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
|
||||
req->rsk_ops->send_reset(sk, skb);
|
||||
|
||||
inet_csk_reqsk_queue_drop(sk, req);
|
||||
return NULL;
|
||||
out:
|
||||
spin_unlock_bh(&dreq->dreq_lock);
|
||||
return child;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(dccp_check_req);
|
||||
|
@ -246,6 +253,7 @@ int dccp_reqsk_init(struct request_sock *req,
|
|||
{
|
||||
struct dccp_request_sock *dreq = dccp_rsk(req);
|
||||
|
||||
spin_lock_init(&dreq->dreq_lock);
|
||||
inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport;
|
||||
inet_rsk(req)->ir_num = ntohs(dccp_hdr(skb)->dccph_dport);
|
||||
inet_rsk(req)->acked = 0;
|
||||
|
|
|
@ -1070,7 +1070,8 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
|
|||
return skb == NULL ? ERR_PTR(err) : skb;
|
||||
}
|
||||
|
||||
static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
|
||||
static int dn_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
bool kern)
|
||||
{
|
||||
struct sock *sk = sock->sk, *newsk;
|
||||
struct sk_buff *skb = NULL;
|
||||
|
@ -1099,7 +1100,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
|
|||
|
||||
cb = DN_SKB_CB(skb);
|
||||
sk->sk_ack_backlog--;
|
||||
newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, 0);
|
||||
newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, kern);
|
||||
if (newsk == NULL) {
|
||||
release_sock(sk);
|
||||
kfree_skb(skb);
|
||||
|
|
|
@ -689,11 +689,12 @@ EXPORT_SYMBOL(inet_stream_connect);
|
|||
* Accept a pending connection. The TCP layer now gives BSD semantics.
|
||||
*/
|
||||
|
||||
int inet_accept(struct socket *sock, struct socket *newsock, int flags)
|
||||
int inet_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
bool kern)
|
||||
{
|
||||
struct sock *sk1 = sock->sk;
|
||||
int err = -EINVAL;
|
||||
struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err);
|
||||
struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err, kern);
|
||||
|
||||
if (!sk2)
|
||||
goto do_err;
|
||||
|
@ -1487,8 +1488,10 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
|
|||
int proto = iph->protocol;
|
||||
int err = -ENOSYS;
|
||||
|
||||
if (skb->encapsulation)
|
||||
if (skb->encapsulation) {
|
||||
skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
|
||||
skb_set_inner_network_header(skb, nhoff);
|
||||
}
|
||||
|
||||
csum_replace2(&iph->check, iph->tot_len, newlen);
|
||||
iph->tot_len = newlen;
|
||||
|
|
|
@ -424,7 +424,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
|
|||
/*
|
||||
* This will accept the next outstanding connection.
|
||||
*/
|
||||
struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
|
||||
struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
|
||||
{
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
struct request_sock_queue *queue = &icsk->icsk_accept_queue;
|
||||
|
|
|
@ -966,7 +966,7 @@ static int __ip_append_data(struct sock *sk,
|
|||
cork->length += length;
|
||||
if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) &&
|
||||
(sk->sk_protocol == IPPROTO_UDP) &&
|
||||
(rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
|
||||
(rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
|
||||
(sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
|
||||
err = ip_ufo_append_data(sk, queue, getfrag, from, length,
|
||||
hh_len, fragheaderlen, transhdrlen,
|
||||
|
|
|
@ -279,10 +279,13 @@ EXPORT_SYMBOL(tcp_v4_connect);
|
|||
*/
|
||||
void tcp_v4_mtu_reduced(struct sock *sk)
|
||||
{
|
||||
struct dst_entry *dst;
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
u32 mtu = tcp_sk(sk)->mtu_info;
|
||||
struct dst_entry *dst;
|
||||
u32 mtu;
|
||||
|
||||
if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
|
||||
return;
|
||||
mtu = tcp_sk(sk)->mtu_info;
|
||||
dst = inet_csk_update_pmtu(sk, mtu);
|
||||
if (!dst)
|
||||
return;
|
||||
|
@ -428,7 +431,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
|
|||
|
||||
switch (type) {
|
||||
case ICMP_REDIRECT:
|
||||
do_redirect(icmp_skb, sk);
|
||||
if (!sock_owned_by_user(sk))
|
||||
do_redirect(icmp_skb, sk);
|
||||
goto out;
|
||||
case ICMP_SOURCE_QUENCH:
|
||||
/* Just silently ignore these. */
|
||||
|
|
|
@ -249,7 +249,8 @@ void tcp_delack_timer_handler(struct sock *sk)
|
|||
|
||||
sk_mem_reclaim_partial(sk);
|
||||
|
||||
if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
|
||||
if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
|
||||
!(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
|
||||
goto out;
|
||||
|
||||
if (time_after(icsk->icsk_ack.timeout, jiffies)) {
|
||||
|
@ -552,7 +553,8 @@ void tcp_write_timer_handler(struct sock *sk)
|
|||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
int event;
|
||||
|
||||
if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
|
||||
if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
|
||||
!icsk->icsk_pending)
|
||||
goto out;
|
||||
|
||||
if (time_after(icsk->icsk_timeout, jiffies)) {
|
||||
|
|
|
@ -920,12 +920,12 @@ static int __init inet6_init(void)
|
|||
err = register_pernet_subsys(&inet6_net_ops);
|
||||
if (err)
|
||||
goto register_pernet_fail;
|
||||
err = icmpv6_init();
|
||||
if (err)
|
||||
goto icmp_fail;
|
||||
err = ip6_mr_init();
|
||||
if (err)
|
||||
goto ipmr_fail;
|
||||
err = icmpv6_init();
|
||||
if (err)
|
||||
goto icmp_fail;
|
||||
err = ndisc_init();
|
||||
if (err)
|
||||
goto ndisc_fail;
|
||||
|
@ -1061,10 +1061,10 @@ static int __init inet6_init(void)
|
|||
ndisc_cleanup();
|
||||
ndisc_fail:
|
||||
ip6_mr_cleanup();
|
||||
ipmr_fail:
|
||||
icmpv6_cleanup();
|
||||
icmp_fail:
|
||||
unregister_pernet_subsys(&inet6_net_ops);
|
||||
ipmr_fail:
|
||||
icmpv6_cleanup();
|
||||
register_pernet_fail:
|
||||
sock_unregister(PF_INET6);
|
||||
rtnl_unregister_all(PF_INET6);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue