mirror of https://gitee.com/openkylin/linux.git
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Handle init flow failures properly in iwlwifi driver, from Shahar S Matityahu. 2) mac80211 TXQs need to be unscheduled on powersave start, from Felix Fietkau. 3) SKB memory accounting fix in A-MDSU aggregation, from Felix Fietkau. 4) Increase RCU lock hold time in mlx5 FPGA code, from Saeed Mahameed. 5) Avoid checksum complete with XDP in mlx5, also from Saeed. 6) Fix netdev feature clobbering in ibmvnic driver, from Thomas Falcon. 7) Partial sent TLS record leak fix from Jakub Kicinski. 8) Reject zero size iova range in vhost, from Jason Wang. 9) Allow pending work to complete before clcsock release from Karsten Graul. 10) Fix XDP handling max MTU in thunderx, from Matteo Croce. 11) A lot of protocols look at the sa_family field of a sockaddr before validating it's length is large enough, from Tetsuo Handa. 12) Don't write to free'd pointer in qede ptp error path, from Colin Ian King. 13) Have to recompile IP options in ipv4_link_failure because it can be invoked from ARP, from Stephen Suryaputra. 14) Doorbell handling fixes in qed from Denis Bolotin. 15) Revert net-sysfs kobject register leak fix, it causes new problems. From Wang Hai. 16) Spectre v1 fix in ATM code, from Gustavo A. R. Silva. 17) Fix put of BROPT_VLAN_STATS_PER_PORT in bridging code, from Nikolay Aleksandrov. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (111 commits) socket: fix compat SO_RCVTIMEO_NEW/SO_SNDTIMEO_NEW tcp: tcp_grow_window() needs to respect tcp_space() ocelot: Clean up stats update deferred work ocelot: Don't sleep in atomic context (irqs_disabled()) net: bridge: fix netlink export of vlan_stats_per_port option qed: fix spelling mistake "faspath" -> "fastpath" tipc: set sysctl_tipc_rmem and named_timeout right range tipc: fix link established but not in session net: Fix missing meta data in skb with vlan packet net: atm: Fix potential Spectre v1 vulnerabilities net/core: work around section mismatch warning for ptp_classifier net: bridge: fix per-port af_packet sockets bnx2x: fix spelling mistake "dicline" -> "decline" route: Avoid crash from dereferencing NULL rt->from MAINTAINERS: normalize Woojung Huh's email address bonding: fix event handling for stacked bonds Revert "net-sysfs: Fix memory leak in netdev_register_kobject" rtnetlink: fix rtnl_valid_stats_req() nlmsg_len check qed: Fix the DORQ's attentions handling qed: Fix missing DORQ attentions ...
This commit is contained in:
commit
2a3a028fc6
|
@ -1009,16 +1009,18 @@ The kernel interface functions are as follows:
|
|||
|
||||
(*) Check call still alive.
|
||||
|
||||
u32 rxrpc_kernel_check_life(struct socket *sock,
|
||||
struct rxrpc_call *call);
|
||||
bool rxrpc_kernel_check_life(struct socket *sock,
|
||||
struct rxrpc_call *call,
|
||||
u32 *_life);
|
||||
void rxrpc_kernel_probe_life(struct socket *sock,
|
||||
struct rxrpc_call *call);
|
||||
|
||||
The first function returns a number that is updated when ACKs are received
|
||||
from the peer (notably including PING RESPONSE ACKs which we can elicit by
|
||||
sending PING ACKs to see if the call still exists on the server). The
|
||||
caller should compare the numbers of two calls to see if the call is still
|
||||
alive after waiting for a suitable interval.
|
||||
The first function passes back in *_life a number that is updated when
|
||||
ACKs are received from the peer (notably including PING RESPONSE ACKs
|
||||
which we can elicit by sending PING ACKs to see if the call still exists
|
||||
on the server). The caller should compare the numbers of two calls to see
|
||||
if the call is still alive after waiting for a suitable interval. It also
|
||||
returns true as long as the call hasn't yet reached the completed state.
|
||||
|
||||
This allows the caller to work out if the server is still contactable and
|
||||
if the call is still alive on the server while waiting for the server to
|
||||
|
|
|
@ -10145,7 +10145,7 @@ F: drivers/spi/spi-at91-usart.c
|
|||
F: Documentation/devicetree/bindings/mfd/atmel-usart.txt
|
||||
|
||||
MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER
|
||||
M: Woojung Huh <Woojung.Huh@microchip.com>
|
||||
M: Woojung Huh <woojung.huh@microchip.com>
|
||||
M: Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
|
|
|
@ -710,10 +710,10 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
|
|||
struct sock *sk = sock->sk;
|
||||
int err = 0;
|
||||
|
||||
if (!maddr || maddr->family != AF_ISDN)
|
||||
if (addr_len < sizeof(struct sockaddr_mISDN))
|
||||
return -EINVAL;
|
||||
|
||||
if (addr_len < sizeof(struct sockaddr_mISDN))
|
||||
if (!maddr || maddr->family != AF_ISDN)
|
||||
return -EINVAL;
|
||||
|
||||
lock_sock(sk);
|
||||
|
|
|
@ -3213,8 +3213,12 @@ static int bond_netdev_event(struct notifier_block *this,
|
|||
return NOTIFY_DONE;
|
||||
|
||||
if (event_dev->flags & IFF_MASTER) {
|
||||
int ret;
|
||||
|
||||
netdev_dbg(event_dev, "IFF_MASTER\n");
|
||||
return bond_master_netdev_event(event, event_dev);
|
||||
ret = bond_master_netdev_event(event, event_dev);
|
||||
if (ret != NOTIFY_DONE)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (event_dev->flags & IFF_SLAVE) {
|
||||
|
|
|
@ -957,7 +957,7 @@ int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
|
|||
bnx2x_sample_bulletin(bp);
|
||||
|
||||
if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
|
||||
BNX2X_ERR("Hypervisor will dicline the request, avoiding\n");
|
||||
BNX2X_ERR("Hypervisor will decline the request, avoiding\n");
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -32,6 +32,13 @@
|
|||
#define DRV_NAME "nicvf"
|
||||
#define DRV_VERSION "1.0"
|
||||
|
||||
/* NOTE: Packets bigger than 1530 are split across multiple pages and XDP needs
|
||||
* the buffer to be contiguous. Allow XDP to be set up only if we don't exceed
|
||||
* this value, keeping headroom for the 14 byte Ethernet header and two
|
||||
* VLAN tags (for QinQ)
|
||||
*/
|
||||
#define MAX_XDP_MTU (1530 - ETH_HLEN - VLAN_HLEN * 2)
|
||||
|
||||
/* Supported devices */
|
||||
static const struct pci_device_id nicvf_id_table[] = {
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
|
||||
|
@ -1582,6 +1589,15 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
|
|||
struct nicvf *nic = netdev_priv(netdev);
|
||||
int orig_mtu = netdev->mtu;
|
||||
|
||||
/* For now just support only the usual MTU sized frames,
|
||||
* plus some headroom for VLAN, QinQ.
|
||||
*/
|
||||
if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) {
|
||||
netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
|
||||
netdev->mtu);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
netdev->mtu = new_mtu;
|
||||
|
||||
if (!netif_running(netdev))
|
||||
|
@ -1830,8 +1846,10 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
|
|||
bool bpf_attached = false;
|
||||
int ret = 0;
|
||||
|
||||
/* For now just support only the usual MTU sized frames */
|
||||
if (prog && (dev->mtu > 1500)) {
|
||||
/* For now just support only the usual MTU sized frames,
|
||||
* plus some headroom for VLAN, QinQ.
|
||||
*/
|
||||
if (prog && dev->mtu > MAX_XDP_MTU) {
|
||||
netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
|
||||
dev->mtu);
|
||||
return -EOPNOTSUPP;
|
||||
|
|
|
@ -1840,13 +1840,9 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
|
|||
int ret;
|
||||
|
||||
if (enable) {
|
||||
ret = clk_prepare_enable(fep->clk_ahb);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = clk_prepare_enable(fep->clk_enet_out);
|
||||
if (ret)
|
||||
goto failed_clk_enet_out;
|
||||
return ret;
|
||||
|
||||
if (fep->clk_ptp) {
|
||||
mutex_lock(&fep->ptp_clk_mutex);
|
||||
|
@ -1866,7 +1862,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
|
|||
|
||||
phy_reset_after_clk_enable(ndev->phydev);
|
||||
} else {
|
||||
clk_disable_unprepare(fep->clk_ahb);
|
||||
clk_disable_unprepare(fep->clk_enet_out);
|
||||
if (fep->clk_ptp) {
|
||||
mutex_lock(&fep->ptp_clk_mutex);
|
||||
|
@ -1885,8 +1880,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
|
|||
failed_clk_ptp:
|
||||
if (fep->clk_enet_out)
|
||||
clk_disable_unprepare(fep->clk_enet_out);
|
||||
failed_clk_enet_out:
|
||||
clk_disable_unprepare(fep->clk_ahb);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -3470,6 +3463,9 @@ fec_probe(struct platform_device *pdev)
|
|||
ret = clk_prepare_enable(fep->clk_ipg);
|
||||
if (ret)
|
||||
goto failed_clk_ipg;
|
||||
ret = clk_prepare_enable(fep->clk_ahb);
|
||||
if (ret)
|
||||
goto failed_clk_ahb;
|
||||
|
||||
fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
|
||||
if (!IS_ERR(fep->reg_phy)) {
|
||||
|
@ -3563,6 +3559,9 @@ fec_probe(struct platform_device *pdev)
|
|||
pm_runtime_put(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
failed_regulator:
|
||||
clk_disable_unprepare(fep->clk_ahb);
|
||||
failed_clk_ahb:
|
||||
clk_disable_unprepare(fep->clk_ipg);
|
||||
failed_clk_ipg:
|
||||
fec_enet_clk_enable(ndev, false);
|
||||
failed_clk:
|
||||
|
@ -3686,6 +3685,7 @@ static int __maybe_unused fec_runtime_suspend(struct device *dev)
|
|||
struct net_device *ndev = dev_get_drvdata(dev);
|
||||
struct fec_enet_private *fep = netdev_priv(ndev);
|
||||
|
||||
clk_disable_unprepare(fep->clk_ahb);
|
||||
clk_disable_unprepare(fep->clk_ipg);
|
||||
|
||||
return 0;
|
||||
|
@ -3695,8 +3695,20 @@ static int __maybe_unused fec_runtime_resume(struct device *dev)
|
|||
{
|
||||
struct net_device *ndev = dev_get_drvdata(dev);
|
||||
struct fec_enet_private *fep = netdev_priv(ndev);
|
||||
int ret;
|
||||
|
||||
return clk_prepare_enable(fep->clk_ipg);
|
||||
ret = clk_prepare_enable(fep->clk_ahb);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = clk_prepare_enable(fep->clk_ipg);
|
||||
if (ret)
|
||||
goto failed_clk_ipg;
|
||||
|
||||
return 0;
|
||||
|
||||
failed_clk_ipg:
|
||||
clk_disable_unprepare(fep->clk_ahb);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops fec_pm_ops = {
|
||||
|
|
|
@ -3762,6 +3762,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
|
|||
{
|
||||
struct device *dev = &adapter->vdev->dev;
|
||||
struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
|
||||
netdev_features_t old_hw_features = 0;
|
||||
union ibmvnic_crq crq;
|
||||
int i;
|
||||
|
||||
|
@ -3837,24 +3838,41 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
|
|||
adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
|
||||
adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
|
||||
|
||||
adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO;
|
||||
if (adapter->state != VNIC_PROBING) {
|
||||
old_hw_features = adapter->netdev->hw_features;
|
||||
adapter->netdev->hw_features = 0;
|
||||
}
|
||||
|
||||
adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
|
||||
|
||||
if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
|
||||
adapter->netdev->features |= NETIF_F_IP_CSUM;
|
||||
adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
|
||||
|
||||
if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
|
||||
adapter->netdev->features |= NETIF_F_IPV6_CSUM;
|
||||
adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
|
||||
|
||||
if ((adapter->netdev->features &
|
||||
(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
|
||||
adapter->netdev->features |= NETIF_F_RXCSUM;
|
||||
adapter->netdev->hw_features |= NETIF_F_RXCSUM;
|
||||
|
||||
if (buf->large_tx_ipv4)
|
||||
adapter->netdev->features |= NETIF_F_TSO;
|
||||
adapter->netdev->hw_features |= NETIF_F_TSO;
|
||||
if (buf->large_tx_ipv6)
|
||||
adapter->netdev->features |= NETIF_F_TSO6;
|
||||
adapter->netdev->hw_features |= NETIF_F_TSO6;
|
||||
|
||||
adapter->netdev->hw_features |= adapter->netdev->features;
|
||||
if (adapter->state == VNIC_PROBING) {
|
||||
adapter->netdev->features |= adapter->netdev->hw_features;
|
||||
} else if (old_hw_features != adapter->netdev->hw_features) {
|
||||
netdev_features_t tmp = 0;
|
||||
|
||||
/* disable features no longer supported */
|
||||
adapter->netdev->features &= adapter->netdev->hw_features;
|
||||
/* turn on features now supported if previously enabled */
|
||||
tmp = (old_hw_features ^ adapter->netdev->hw_features) &
|
||||
adapter->netdev->hw_features;
|
||||
adapter->netdev->features |=
|
||||
tmp & adapter->netdev->wanted_features;
|
||||
}
|
||||
|
||||
memset(&crq, 0, sizeof(crq));
|
||||
crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
|
||||
|
|
|
@ -858,6 +858,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs);
|
|||
* switching channels
|
||||
*/
|
||||
typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
|
||||
int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
|
||||
int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
|
||||
struct mlx5e_channels *new_chs,
|
||||
mlx5e_fp_hw_modify hw_modify);
|
||||
|
|
|
@ -186,12 +186,17 @@ static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx)
|
|||
|
||||
static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv)
|
||||
{
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
rtnl_lock();
|
||||
mutex_lock(&priv->state_lock);
|
||||
mlx5e_close_locked(priv->netdev);
|
||||
err = mlx5e_open_locked(priv->netdev);
|
||||
|
||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
|
||||
goto out;
|
||||
|
||||
err = mlx5e_safe_reopen_channels(priv);
|
||||
|
||||
out:
|
||||
mutex_unlock(&priv->state_lock);
|
||||
rtnl_unlock();
|
||||
|
||||
|
|
|
@ -39,6 +39,10 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (!(mlx5e_eswitch_rep(*out_dev) &&
|
||||
mlx5e_is_uplink_rep(netdev_priv(*out_dev))))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1768,7 +1768,8 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
|
|||
struct mlx5e_channel *c;
|
||||
int i;
|
||||
|
||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
|
||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state) ||
|
||||
priv->channels.params.xdp_prog)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < channels->num; i++) {
|
||||
|
|
|
@ -951,7 +951,11 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
|
|||
if (params->rx_dim_enabled)
|
||||
__set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
|
||||
|
||||
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE))
|
||||
/* We disable csum_complete when XDP is enabled since
|
||||
* XDP programs might manipulate packets which will render
|
||||
* skb->checksum incorrect.
|
||||
*/
|
||||
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp)
|
||||
__set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
|
||||
|
||||
return 0;
|
||||
|
@ -2937,6 +2941,14 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_channels new_channels = {};
|
||||
|
||||
new_channels.params = priv->channels.params;
|
||||
return mlx5e_safe_switch_channels(priv, &new_channels, NULL);
|
||||
}
|
||||
|
||||
void mlx5e_timestamp_init(struct mlx5e_priv *priv)
|
||||
{
|
||||
priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
|
||||
|
@ -4161,11 +4173,10 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
|
|||
if (!report_failed)
|
||||
goto unlock;
|
||||
|
||||
mlx5e_close_locked(priv->netdev);
|
||||
err = mlx5e_open_locked(priv->netdev);
|
||||
err = mlx5e_safe_reopen_channels(priv);
|
||||
if (err)
|
||||
netdev_err(priv->netdev,
|
||||
"mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
|
||||
"mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n",
|
||||
err);
|
||||
|
||||
unlock:
|
||||
|
@ -4553,7 +4564,7 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
|
|||
{
|
||||
enum mlx5e_traffic_types tt;
|
||||
|
||||
rss_params->hfunc = ETH_RSS_HASH_XOR;
|
||||
rss_params->hfunc = ETH_RSS_HASH_TOP;
|
||||
netdev_rss_key_fill(rss_params->toeplitz_hash_key,
|
||||
sizeof(rss_params->toeplitz_hash_key));
|
||||
mlx5e_build_default_indir_rqt(rss_params->indirection_rqt,
|
||||
|
|
|
@ -692,7 +692,14 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
|
|||
{
|
||||
*proto = ((struct ethhdr *)skb->data)->h_proto;
|
||||
*proto = __vlan_get_protocol(skb, *proto, network_depth);
|
||||
return (*proto == htons(ETH_P_IP) || *proto == htons(ETH_P_IPV6));
|
||||
|
||||
if (*proto == htons(ETH_P_IP))
|
||||
return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr));
|
||||
|
||||
if (*proto == htons(ETH_P_IPV6))
|
||||
return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr));
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
|
||||
|
@ -712,17 +719,6 @@ static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
|
|||
rq->stats->ecn_mark += !!rc;
|
||||
}
|
||||
|
||||
static u32 mlx5e_get_fcs(const struct sk_buff *skb)
|
||||
{
|
||||
const void *fcs_bytes;
|
||||
u32 _fcs_bytes;
|
||||
|
||||
fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN,
|
||||
ETH_FCS_LEN, &_fcs_bytes);
|
||||
|
||||
return __get_unaligned_cpu32(fcs_bytes);
|
||||
}
|
||||
|
||||
static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
|
||||
{
|
||||
void *ip_p = skb->data + network_depth;
|
||||
|
@ -733,6 +729,68 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
|
|||
|
||||
#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
|
||||
|
||||
#define MAX_PADDING 8
|
||||
|
||||
static void
|
||||
tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
|
||||
struct mlx5e_rq_stats *stats)
|
||||
{
|
||||
stats->csum_complete_tail_slow++;
|
||||
skb->csum = csum_block_add(skb->csum,
|
||||
skb_checksum(skb, offset, len, 0),
|
||||
offset);
|
||||
}
|
||||
|
||||
static void
|
||||
tail_padding_csum(struct sk_buff *skb, int offset,
|
||||
struct mlx5e_rq_stats *stats)
|
||||
{
|
||||
u8 tail_padding[MAX_PADDING];
|
||||
int len = skb->len - offset;
|
||||
void *tail;
|
||||
|
||||
if (unlikely(len > MAX_PADDING)) {
|
||||
tail_padding_csum_slow(skb, offset, len, stats);
|
||||
return;
|
||||
}
|
||||
|
||||
tail = skb_header_pointer(skb, offset, len, tail_padding);
|
||||
if (unlikely(!tail)) {
|
||||
tail_padding_csum_slow(skb, offset, len, stats);
|
||||
return;
|
||||
}
|
||||
|
||||
stats->csum_complete_tail++;
|
||||
skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto,
|
||||
struct mlx5e_rq_stats *stats)
|
||||
{
|
||||
struct ipv6hdr *ip6;
|
||||
struct iphdr *ip4;
|
||||
int pkt_len;
|
||||
|
||||
switch (proto) {
|
||||
case htons(ETH_P_IP):
|
||||
ip4 = (struct iphdr *)(skb->data + network_depth);
|
||||
pkt_len = network_depth + ntohs(ip4->tot_len);
|
||||
break;
|
||||
case htons(ETH_P_IPV6):
|
||||
ip6 = (struct ipv6hdr *)(skb->data + network_depth);
|
||||
pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
if (likely(pkt_len >= skb->len))
|
||||
return;
|
||||
|
||||
tail_padding_csum(skb, pkt_len, stats);
|
||||
}
|
||||
|
||||
static inline void mlx5e_handle_csum(struct net_device *netdev,
|
||||
struct mlx5_cqe64 *cqe,
|
||||
struct mlx5e_rq *rq,
|
||||
|
@ -752,7 +810,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
|
|||
return;
|
||||
}
|
||||
|
||||
if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)))
|
||||
/* True when explicitly set via priv flag, or XDP prog is loaded */
|
||||
if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
|
||||
goto csum_unnecessary;
|
||||
|
||||
/* CQE csum doesn't cover padding octets in short ethernet
|
||||
|
@ -780,18 +839,15 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
|
|||
skb->csum = csum_partial(skb->data + ETH_HLEN,
|
||||
network_depth - ETH_HLEN,
|
||||
skb->csum);
|
||||
if (unlikely(netdev->features & NETIF_F_RXFCS))
|
||||
skb->csum = csum_block_add(skb->csum,
|
||||
(__force __wsum)mlx5e_get_fcs(skb),
|
||||
skb->len - ETH_FCS_LEN);
|
||||
|
||||
mlx5e_skb_padding_csum(skb, network_depth, proto, stats);
|
||||
stats->csum_complete++;
|
||||
return;
|
||||
}
|
||||
|
||||
csum_unnecessary:
|
||||
if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
|
||||
((cqe->hds_ip_ext & CQE_L4_OK) ||
|
||||
(get_cqe_l4_hdr_type(cqe) == CQE_L4_HDR_TYPE_NONE)))) {
|
||||
(cqe->hds_ip_ext & CQE_L4_OK))) {
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
if (cqe_is_tunneled(cqe)) {
|
||||
skb->csum_level = 1;
|
||||
|
|
|
@ -59,6 +59,8 @@ static const struct counter_desc sw_stats_desc[] = {
|
|||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
|
||||
|
@ -151,6 +153,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
|
|||
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
|
||||
s->rx_csum_none += rq_stats->csum_none;
|
||||
s->rx_csum_complete += rq_stats->csum_complete;
|
||||
s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
|
||||
s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
|
||||
s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
|
||||
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
|
||||
s->rx_xdp_drop += rq_stats->xdp_drop;
|
||||
|
@ -1190,6 +1194,8 @@ static const struct counter_desc rq_stats_desc[] = {
|
|||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
|
||||
|
|
|
@ -71,6 +71,8 @@ struct mlx5e_sw_stats {
|
|||
u64 rx_csum_unnecessary;
|
||||
u64 rx_csum_none;
|
||||
u64 rx_csum_complete;
|
||||
u64 rx_csum_complete_tail;
|
||||
u64 rx_csum_complete_tail_slow;
|
||||
u64 rx_csum_unnecessary_inner;
|
||||
u64 rx_xdp_drop;
|
||||
u64 rx_xdp_redirect;
|
||||
|
@ -181,6 +183,8 @@ struct mlx5e_rq_stats {
|
|||
u64 packets;
|
||||
u64 bytes;
|
||||
u64 csum_complete;
|
||||
u64 csum_complete_tail;
|
||||
u64 csum_complete_tail_slow;
|
||||
u64 csum_unnecessary;
|
||||
u64 csum_unnecessary_inner;
|
||||
u64 csum_none;
|
||||
|
|
|
@ -148,14 +148,16 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void mlx5_fpga_tls_release_swid(struct idr *idr,
|
||||
spinlock_t *idr_spinlock, u32 swid)
|
||||
static void *mlx5_fpga_tls_release_swid(struct idr *idr,
|
||||
spinlock_t *idr_spinlock, u32 swid)
|
||||
{
|
||||
unsigned long flags;
|
||||
void *ptr;
|
||||
|
||||
spin_lock_irqsave(idr_spinlock, flags);
|
||||
idr_remove(idr, swid);
|
||||
ptr = idr_remove(idr, swid);
|
||||
spin_unlock_irqrestore(idr_spinlock, flags);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
|
||||
|
@ -165,20 +167,12 @@ static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
|
|||
kfree(buf);
|
||||
}
|
||||
|
||||
struct mlx5_teardown_stream_context {
|
||||
struct mlx5_fpga_tls_command_context cmd;
|
||||
u32 swid;
|
||||
};
|
||||
|
||||
static void
|
||||
mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
|
||||
struct mlx5_fpga_device *fdev,
|
||||
struct mlx5_fpga_tls_command_context *cmd,
|
||||
struct mlx5_fpga_dma_buf *resp)
|
||||
{
|
||||
struct mlx5_teardown_stream_context *ctx =
|
||||
container_of(cmd, struct mlx5_teardown_stream_context, cmd);
|
||||
|
||||
if (resp) {
|
||||
u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
|
||||
|
||||
|
@ -186,14 +180,6 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
|
|||
mlx5_fpga_err(fdev,
|
||||
"Teardown stream failed with syndrome = %d",
|
||||
syndrome);
|
||||
else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx))
|
||||
mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
|
||||
&fdev->tls->tx_idr_spinlock,
|
||||
ctx->swid);
|
||||
else
|
||||
mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr,
|
||||
&fdev->tls->rx_idr_spinlock,
|
||||
ctx->swid);
|
||||
}
|
||||
mlx5_fpga_tls_put_command_ctx(cmd);
|
||||
}
|
||||
|
@ -217,22 +203,22 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
|
|||
void *cmd;
|
||||
int ret;
|
||||
|
||||
rcu_read_lock();
|
||||
flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!flow) {
|
||||
WARN_ONCE(1, "Received NULL pointer for handle\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
buf = kzalloc(size, GFP_ATOMIC);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
cmd = (buf + 1);
|
||||
|
||||
rcu_read_lock();
|
||||
flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
|
||||
if (unlikely(!flow)) {
|
||||
rcu_read_unlock();
|
||||
WARN_ONCE(1, "Received NULL pointer for handle\n");
|
||||
kfree(buf);
|
||||
return -EINVAL;
|
||||
}
|
||||
mlx5_fpga_tls_flow_to_cmd(flow, cmd);
|
||||
rcu_read_unlock();
|
||||
|
||||
MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
|
||||
MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
|
||||
|
@ -253,7 +239,7 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
|
|||
static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
|
||||
void *flow, u32 swid, gfp_t flags)
|
||||
{
|
||||
struct mlx5_teardown_stream_context *ctx;
|
||||
struct mlx5_fpga_tls_command_context *ctx;
|
||||
struct mlx5_fpga_dma_buf *buf;
|
||||
void *cmd;
|
||||
|
||||
|
@ -261,7 +247,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
|
|||
if (!ctx)
|
||||
return;
|
||||
|
||||
buf = &ctx->cmd.buf;
|
||||
buf = &ctx->buf;
|
||||
cmd = (ctx + 1);
|
||||
MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
|
||||
MLX5_SET(tls_cmd, cmd, swid, swid);
|
||||
|
@ -272,8 +258,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
|
|||
buf->sg[0].data = cmd;
|
||||
buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
|
||||
|
||||
ctx->swid = swid;
|
||||
mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
|
||||
mlx5_fpga_tls_cmd_send(mdev->fpga, ctx,
|
||||
mlx5_fpga_tls_teardown_completion);
|
||||
}
|
||||
|
||||
|
@ -283,13 +268,14 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
|
|||
struct mlx5_fpga_tls *tls = mdev->fpga->tls;
|
||||
void *flow;
|
||||
|
||||
rcu_read_lock();
|
||||
if (direction_sx)
|
||||
flow = idr_find(&tls->tx_idr, swid);
|
||||
flow = mlx5_fpga_tls_release_swid(&tls->tx_idr,
|
||||
&tls->tx_idr_spinlock,
|
||||
swid);
|
||||
else
|
||||
flow = idr_find(&tls->rx_idr, swid);
|
||||
|
||||
rcu_read_unlock();
|
||||
flow = mlx5_fpga_tls_release_swid(&tls->rx_idr,
|
||||
&tls->rx_idr_spinlock,
|
||||
swid);
|
||||
|
||||
if (!flow) {
|
||||
mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
|
||||
|
@ -297,6 +283,7 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
|
|||
return;
|
||||
}
|
||||
|
||||
synchronize_rcu(); /* before kfree(flow) */
|
||||
mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -568,7 +568,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
|
|||
if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
|
||||
return 0;
|
||||
|
||||
emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0);
|
||||
emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
|
||||
if (!emad_wq)
|
||||
return -ENOMEM;
|
||||
mlxsw_core->emad_wq = emad_wq;
|
||||
|
@ -1958,10 +1958,10 @@ static int __init mlxsw_core_module_init(void)
|
|||
{
|
||||
int err;
|
||||
|
||||
mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0);
|
||||
mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
|
||||
if (!mlxsw_wq)
|
||||
return -ENOMEM;
|
||||
mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
|
||||
mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
|
||||
mlxsw_core_driver_name);
|
||||
if (!mlxsw_owq) {
|
||||
err = -ENOMEM;
|
||||
|
|
|
@ -70,6 +70,7 @@ static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
|
|||
{MLXSW_REG_SBXX_DIR_EGRESS, 1},
|
||||
{MLXSW_REG_SBXX_DIR_EGRESS, 2},
|
||||
{MLXSW_REG_SBXX_DIR_EGRESS, 3},
|
||||
{MLXSW_REG_SBXX_DIR_EGRESS, 15},
|
||||
};
|
||||
|
||||
#define MLXSW_SP_SB_ING_TC_COUNT 8
|
||||
|
@ -428,6 +429,7 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
|
|||
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
|
||||
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
|
||||
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
|
||||
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI),
|
||||
};
|
||||
|
||||
static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
|
||||
|
@ -517,14 +519,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
|
|||
MLXSW_SP_SB_CM(0, 7, 4),
|
||||
MLXSW_SP_SB_CM(0, 7, 4),
|
||||
MLXSW_SP_SB_CM(0, 7, 4),
|
||||
MLXSW_SP_SB_CM(0, 7, 4),
|
||||
MLXSW_SP_SB_CM(0, 7, 4),
|
||||
MLXSW_SP_SB_CM(0, 7, 4),
|
||||
MLXSW_SP_SB_CM(0, 7, 4),
|
||||
MLXSW_SP_SB_CM(0, 7, 4),
|
||||
MLXSW_SP_SB_CM(0, 7, 4),
|
||||
MLXSW_SP_SB_CM(0, 7, 4),
|
||||
MLXSW_SP_SB_CM(0, 7, 4),
|
||||
MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
|
||||
MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
|
||||
MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
|
||||
MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
|
||||
MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
|
||||
MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
|
||||
MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
|
||||
MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
|
||||
MLXSW_SP_SB_CM(1, 0xff, 4),
|
||||
};
|
||||
|
||||
|
@ -671,6 +673,7 @@ static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
|
|||
MLXSW_SP_SB_PM(0, 0),
|
||||
MLXSW_SP_SB_PM(0, 0),
|
||||
MLXSW_SP_SB_PM(0, 0),
|
||||
MLXSW_SP_SB_PM(10000, 90000),
|
||||
};
|
||||
|
||||
static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
|
||||
|
|
|
@ -6781,7 +6781,7 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
|
|||
/* A RIF is not created for macvlan netdevs. Their MAC is used to
|
||||
* populate the FDB
|
||||
*/
|
||||
if (netif_is_macvlan(dev))
|
||||
if (netif_is_macvlan(dev) || netif_is_l3_master(dev))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
|
||||
|
|
|
@ -1630,7 +1630,7 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
|
|||
u16 fid_index;
|
||||
int err = 0;
|
||||
|
||||
if (switchdev_trans_ph_prepare(trans))
|
||||
if (switchdev_trans_ph_commit(trans))
|
||||
return 0;
|
||||
|
||||
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
|
||||
|
|
|
@ -613,7 +613,7 @@ static int ocelot_mact_mc_add(struct ocelot_port *port,
|
|||
struct netdev_hw_addr *hw_addr)
|
||||
{
|
||||
struct ocelot *ocelot = port->ocelot;
|
||||
struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_KERNEL);
|
||||
struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_ATOMIC);
|
||||
|
||||
if (!ha)
|
||||
return -ENOMEM;
|
||||
|
@ -959,10 +959,8 @@ static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
|
|||
ETH_GSTRING_LEN);
|
||||
}
|
||||
|
||||
static void ocelot_check_stats(struct work_struct *work)
|
||||
static void ocelot_update_stats(struct ocelot *ocelot)
|
||||
{
|
||||
struct delayed_work *del_work = to_delayed_work(work);
|
||||
struct ocelot *ocelot = container_of(del_work, struct ocelot, stats_work);
|
||||
int i, j;
|
||||
|
||||
mutex_lock(&ocelot->stats_lock);
|
||||
|
@ -986,11 +984,19 @@ static void ocelot_check_stats(struct work_struct *work)
|
|||
}
|
||||
}
|
||||
|
||||
cancel_delayed_work(&ocelot->stats_work);
|
||||
mutex_unlock(&ocelot->stats_lock);
|
||||
}
|
||||
|
||||
static void ocelot_check_stats_work(struct work_struct *work)
|
||||
{
|
||||
struct delayed_work *del_work = to_delayed_work(work);
|
||||
struct ocelot *ocelot = container_of(del_work, struct ocelot,
|
||||
stats_work);
|
||||
|
||||
ocelot_update_stats(ocelot);
|
||||
|
||||
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
|
||||
OCELOT_STATS_CHECK_DELAY);
|
||||
|
||||
mutex_unlock(&ocelot->stats_lock);
|
||||
}
|
||||
|
||||
static void ocelot_get_ethtool_stats(struct net_device *dev,
|
||||
|
@ -1001,7 +1007,7 @@ static void ocelot_get_ethtool_stats(struct net_device *dev,
|
|||
int i;
|
||||
|
||||
/* check and update now */
|
||||
ocelot_check_stats(&ocelot->stats_work.work);
|
||||
ocelot_update_stats(ocelot);
|
||||
|
||||
/* Copy all counters */
|
||||
for (i = 0; i < ocelot->num_stats; i++)
|
||||
|
@ -1809,7 +1815,7 @@ int ocelot_init(struct ocelot *ocelot)
|
|||
ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6),
|
||||
ANA_CPUQ_8021_CFG, i);
|
||||
|
||||
INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats);
|
||||
INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
|
||||
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
|
||||
OCELOT_STATS_CHECK_DELAY);
|
||||
return 0;
|
||||
|
|
|
@ -2366,6 +2366,7 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
|
|||
dma_object->addr))) {
|
||||
vxge_os_dma_free(devh->pdev, memblock,
|
||||
&dma_object->acc_handle);
|
||||
memblock = NULL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
|
|
|
@ -431,12 +431,16 @@ struct qed_qm_info {
|
|||
u8 num_pf_rls;
|
||||
};
|
||||
|
||||
#define QED_OVERFLOW_BIT 1
|
||||
|
||||
struct qed_db_recovery_info {
|
||||
struct list_head list;
|
||||
|
||||
/* Lock to protect the doorbell recovery mechanism list */
|
||||
spinlock_t lock;
|
||||
bool dorq_attn;
|
||||
u32 db_recovery_counter;
|
||||
unsigned long overflow;
|
||||
};
|
||||
|
||||
struct storm_stats {
|
||||
|
@ -920,8 +924,7 @@ u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
|
|||
|
||||
/* doorbell recovery mechanism */
|
||||
void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
|
||||
void qed_db_recovery_execute(struct qed_hwfn *p_hwfn,
|
||||
enum qed_db_rec_exec db_exec);
|
||||
void qed_db_recovery_execute(struct qed_hwfn *p_hwfn);
|
||||
bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
|
||||
|
||||
/* Other Linux specific common definitions */
|
||||
|
|
|
@ -102,11 +102,15 @@ static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn,
|
|||
|
||||
/* Doorbell address sanity (address within doorbell bar range) */
|
||||
static bool qed_db_rec_sanity(struct qed_dev *cdev,
|
||||
void __iomem *db_addr, void *db_data)
|
||||
void __iomem *db_addr,
|
||||
enum qed_db_rec_width db_width,
|
||||
void *db_data)
|
||||
{
|
||||
u32 width = (db_width == DB_REC_WIDTH_32B) ? 32 : 64;
|
||||
|
||||
/* Make sure doorbell address is within the doorbell bar */
|
||||
if (db_addr < cdev->doorbells ||
|
||||
(u8 __iomem *)db_addr >
|
||||
(u8 __iomem *)db_addr + width >
|
||||
(u8 __iomem *)cdev->doorbells + cdev->db_size) {
|
||||
WARN(true,
|
||||
"Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n",
|
||||
|
@ -159,7 +163,7 @@ int qed_db_recovery_add(struct qed_dev *cdev,
|
|||
}
|
||||
|
||||
/* Sanitize doorbell address */
|
||||
if (!qed_db_rec_sanity(cdev, db_addr, db_data))
|
||||
if (!qed_db_rec_sanity(cdev, db_addr, db_width, db_data))
|
||||
return -EINVAL;
|
||||
|
||||
/* Obtain hwfn from doorbell address */
|
||||
|
@ -205,10 +209,6 @@ int qed_db_recovery_del(struct qed_dev *cdev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Sanitize doorbell address */
|
||||
if (!qed_db_rec_sanity(cdev, db_addr, db_data))
|
||||
return -EINVAL;
|
||||
|
||||
/* Obtain hwfn from doorbell address */
|
||||
p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr);
|
||||
|
||||
|
@ -300,31 +300,24 @@ void qed_db_recovery_dp(struct qed_hwfn *p_hwfn)
|
|||
|
||||
/* Ring the doorbell of a single doorbell recovery entry */
|
||||
static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
|
||||
struct qed_db_recovery_entry *db_entry,
|
||||
enum qed_db_rec_exec db_exec)
|
||||
struct qed_db_recovery_entry *db_entry)
|
||||
{
|
||||
if (db_exec != DB_REC_ONCE) {
|
||||
/* Print according to width */
|
||||
if (db_entry->db_width == DB_REC_WIDTH_32B) {
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
|
||||
"%s doorbell address %p data %x\n",
|
||||
db_exec == DB_REC_DRY_RUN ?
|
||||
"would have rung" : "ringing",
|
||||
db_entry->db_addr,
|
||||
*(u32 *)db_entry->db_data);
|
||||
} else {
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
|
||||
"%s doorbell address %p data %llx\n",
|
||||
db_exec == DB_REC_DRY_RUN ?
|
||||
"would have rung" : "ringing",
|
||||
db_entry->db_addr,
|
||||
*(u64 *)(db_entry->db_data));
|
||||
}
|
||||
/* Print according to width */
|
||||
if (db_entry->db_width == DB_REC_WIDTH_32B) {
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
|
||||
"ringing doorbell address %p data %x\n",
|
||||
db_entry->db_addr,
|
||||
*(u32 *)db_entry->db_data);
|
||||
} else {
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
|
||||
"ringing doorbell address %p data %llx\n",
|
||||
db_entry->db_addr,
|
||||
*(u64 *)(db_entry->db_data));
|
||||
}
|
||||
|
||||
/* Sanity */
|
||||
if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr,
|
||||
db_entry->db_data))
|
||||
db_entry->db_width, db_entry->db_data))
|
||||
return;
|
||||
|
||||
/* Flush the write combined buffer. Since there are multiple doorbelling
|
||||
|
@ -334,14 +327,12 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
|
|||
wmb();
|
||||
|
||||
/* Ring the doorbell */
|
||||
if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) {
|
||||
if (db_entry->db_width == DB_REC_WIDTH_32B)
|
||||
DIRECT_REG_WR(db_entry->db_addr,
|
||||
*(u32 *)(db_entry->db_data));
|
||||
else
|
||||
DIRECT_REG_WR64(db_entry->db_addr,
|
||||
*(u64 *)(db_entry->db_data));
|
||||
}
|
||||
if (db_entry->db_width == DB_REC_WIDTH_32B)
|
||||
DIRECT_REG_WR(db_entry->db_addr,
|
||||
*(u32 *)(db_entry->db_data));
|
||||
else
|
||||
DIRECT_REG_WR64(db_entry->db_addr,
|
||||
*(u64 *)(db_entry->db_data));
|
||||
|
||||
/* Flush the write combined buffer. Next doorbell may come from a
|
||||
* different entity to the same address...
|
||||
|
@ -350,29 +341,21 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
|
|||
}
|
||||
|
||||
/* Traverse the doorbell recovery entry list and ring all the doorbells */
|
||||
void qed_db_recovery_execute(struct qed_hwfn *p_hwfn,
|
||||
enum qed_db_rec_exec db_exec)
|
||||
void qed_db_recovery_execute(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
struct qed_db_recovery_entry *db_entry = NULL;
|
||||
|
||||
if (db_exec != DB_REC_ONCE) {
|
||||
DP_NOTICE(p_hwfn,
|
||||
"Executing doorbell recovery. Counter was %d\n",
|
||||
p_hwfn->db_recovery_info.db_recovery_counter);
|
||||
DP_NOTICE(p_hwfn, "Executing doorbell recovery. Counter was %d\n",
|
||||
p_hwfn->db_recovery_info.db_recovery_counter);
|
||||
|
||||
/* Track amount of times recovery was executed */
|
||||
p_hwfn->db_recovery_info.db_recovery_counter++;
|
||||
}
|
||||
/* Track amount of times recovery was executed */
|
||||
p_hwfn->db_recovery_info.db_recovery_counter++;
|
||||
|
||||
/* Protect the list */
|
||||
spin_lock_bh(&p_hwfn->db_recovery_info.lock);
|
||||
list_for_each_entry(db_entry,
|
||||
&p_hwfn->db_recovery_info.list, list_entry) {
|
||||
qed_db_recovery_ring(p_hwfn, db_entry, db_exec);
|
||||
if (db_exec == DB_REC_ONCE)
|
||||
break;
|
||||
}
|
||||
|
||||
&p_hwfn->db_recovery_info.list, list_entry)
|
||||
qed_db_recovery_ring(p_hwfn, db_entry);
|
||||
spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -378,6 +378,9 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
|
|||
u32 count = QED_DB_REC_COUNT;
|
||||
u32 usage = 1;
|
||||
|
||||
/* Flush any pending (e)dpms as they may never arrive */
|
||||
qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
|
||||
|
||||
/* wait for usage to zero or count to run out. This is necessary since
|
||||
* EDPM doorbell transactions can take multiple 64b cycles, and as such
|
||||
* can "split" over the pci. Possibly, the doorbell drop can happen with
|
||||
|
@ -406,51 +409,74 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
|
|||
|
||||
int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
||||
{
|
||||
u32 overflow;
|
||||
u32 attn_ovfl, cur_ovfl;
|
||||
int rc;
|
||||
|
||||
overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
|
||||
DP_NOTICE(p_hwfn, "PF Overflow sticky 0x%x\n", overflow);
|
||||
if (!overflow) {
|
||||
qed_db_recovery_execute(p_hwfn, DB_REC_ONCE);
|
||||
attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT,
|
||||
&p_hwfn->db_recovery_info.overflow);
|
||||
cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
|
||||
if (!cur_ovfl && !attn_ovfl)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (qed_edpm_enabled(p_hwfn)) {
|
||||
DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n",
|
||||
attn_ovfl, cur_ovfl);
|
||||
|
||||
if (cur_ovfl && !p_hwfn->db_bar_no_edpm) {
|
||||
rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Flush any pending (e)dpm as they may never arrive */
|
||||
qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
|
||||
|
||||
/* Release overflow sticky indication (stop silently dropping everything) */
|
||||
qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
|
||||
|
||||
/* Repeat all last doorbells (doorbell drop recovery) */
|
||||
qed_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL);
|
||||
qed_db_recovery_execute(p_hwfn);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
|
||||
static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
|
||||
u32 overflow;
|
||||
int rc;
|
||||
|
||||
overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
|
||||
if (!overflow)
|
||||
goto out;
|
||||
|
||||
/* Run PF doorbell recovery in next periodic handler */
|
||||
set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow);
|
||||
|
||||
if (!p_hwfn->db_bar_no_edpm) {
|
||||
rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
|
||||
qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
|
||||
out:
|
||||
/* Schedule the handler even if overflow was not detected */
|
||||
qed_periodic_db_rec_start(p_hwfn);
|
||||
}
|
||||
|
||||
static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
u32 int_sts, first_drop_reason, details, address, all_drops_reason;
|
||||
struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
|
||||
int rc;
|
||||
|
||||
int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
|
||||
DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts);
|
||||
|
||||
/* int_sts may be zero since all PFs were interrupted for doorbell
|
||||
* overflow but another one already handled it. Can abort here. If
|
||||
* This PF also requires overflow recovery we will be interrupted again.
|
||||
* The masked almost full indication may also be set. Ignoring.
|
||||
*/
|
||||
int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
|
||||
if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL))
|
||||
return 0;
|
||||
|
||||
DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts);
|
||||
|
||||
/* check if db_drop or overflow happened */
|
||||
if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
|
||||
DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
|
||||
|
@ -477,11 +503,6 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
|
|||
GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
|
||||
first_drop_reason, all_drops_reason);
|
||||
|
||||
rc = qed_db_rec_handler(p_hwfn, p_ptt);
|
||||
qed_periodic_db_rec_start(p_hwfn);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Clear the doorbell drop details and prepare for next drop */
|
||||
qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
|
||||
|
||||
|
@ -507,6 +528,25 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
p_hwfn->db_recovery_info.dorq_attn = true;
|
||||
qed_dorq_attn_overflow(p_hwfn);
|
||||
|
||||
return qed_dorq_attn_int_sts(p_hwfn);
|
||||
}
|
||||
|
||||
static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
if (p_hwfn->db_recovery_info.dorq_attn)
|
||||
goto out;
|
||||
|
||||
/* Call DORQ callback if the attention was missed */
|
||||
qed_dorq_attn_cb(p_hwfn);
|
||||
out:
|
||||
p_hwfn->db_recovery_info.dorq_attn = false;
|
||||
}
|
||||
|
||||
/* Instead of major changes to the data-structure, we have a some 'special'
|
||||
* identifiers for sources that changed meaning between adapters.
|
||||
*/
|
||||
|
@ -1080,6 +1120,9 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
|
|||
}
|
||||
}
|
||||
|
||||
/* Handle missed DORQ attention */
|
||||
qed_dorq_attn_handler(p_hwfn);
|
||||
|
||||
/* Clear IGU indication for the deasserted bits */
|
||||
DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
|
||||
GTT_BAR0_MAP_REG_IGU_CMD +
|
||||
|
|
|
@ -192,8 +192,8 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
|
|||
|
||||
/**
|
||||
* @brief - Doorbell Recovery handler.
|
||||
* Run DB_REAL_DEAL doorbell recovery in case of PF overflow
|
||||
* (and flush DORQ if needed), otherwise run DB_REC_ONCE.
|
||||
* Run doorbell recovery in case of PF overflow (and flush DORQ if
|
||||
* needed).
|
||||
*
|
||||
* @param p_hwfn
|
||||
* @param p_ptt
|
||||
|
|
|
@ -970,7 +970,7 @@ static void qed_update_pf_params(struct qed_dev *cdev,
|
|||
}
|
||||
}
|
||||
|
||||
#define QED_PERIODIC_DB_REC_COUNT 100
|
||||
#define QED_PERIODIC_DB_REC_COUNT 10
|
||||
#define QED_PERIODIC_DB_REC_INTERVAL_MS 100
|
||||
#define QED_PERIODIC_DB_REC_INTERVAL \
|
||||
msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
|
||||
|
|
|
@ -1591,7 +1591,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
|
|||
p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
|
||||
} else {
|
||||
DP_INFO(p_hwfn,
|
||||
"VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
|
||||
"VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n",
|
||||
vf->abs_vf_id,
|
||||
req->vfdev_info.eth_fp_hsi_major,
|
||||
req->vfdev_info.eth_fp_hsi_minor,
|
||||
|
|
|
@ -490,18 +490,17 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
|
|||
|
||||
ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
|
||||
if (IS_ERR(ptp->clock)) {
|
||||
rc = -EINVAL;
|
||||
DP_ERR(edev, "PTP clock registration failed\n");
|
||||
qede_ptp_disable(edev);
|
||||
rc = -EINVAL;
|
||||
goto err2;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err2:
|
||||
qede_ptp_disable(edev);
|
||||
ptp->clock = NULL;
|
||||
err1:
|
||||
kfree(ptp);
|
||||
err2:
|
||||
edev->ptp = NULL;
|
||||
|
||||
return rc;
|
||||
|
|
|
@ -1246,6 +1246,23 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
|
|||
goto err_option_port_add;
|
||||
}
|
||||
|
||||
/* set promiscuity level to new slave */
|
||||
if (dev->flags & IFF_PROMISC) {
|
||||
err = dev_set_promiscuity(port_dev, 1);
|
||||
if (err)
|
||||
goto err_set_slave_promisc;
|
||||
}
|
||||
|
||||
/* set allmulti level to new slave */
|
||||
if (dev->flags & IFF_ALLMULTI) {
|
||||
err = dev_set_allmulti(port_dev, 1);
|
||||
if (err) {
|
||||
if (dev->flags & IFF_PROMISC)
|
||||
dev_set_promiscuity(port_dev, -1);
|
||||
goto err_set_slave_promisc;
|
||||
}
|
||||
}
|
||||
|
||||
netif_addr_lock_bh(dev);
|
||||
dev_uc_sync_multiple(port_dev, dev);
|
||||
dev_mc_sync_multiple(port_dev, dev);
|
||||
|
@ -1262,6 +1279,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
|
|||
|
||||
return 0;
|
||||
|
||||
err_set_slave_promisc:
|
||||
__team_option_inst_del_port(team, port);
|
||||
|
||||
err_option_port_add:
|
||||
team_upper_dev_unlink(team, port);
|
||||
|
||||
|
@ -1307,6 +1327,12 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
|
|||
|
||||
team_port_disable(team, port);
|
||||
list_del_rcu(&port->list);
|
||||
|
||||
if (dev->flags & IFF_PROMISC)
|
||||
dev_set_promiscuity(port_dev, -1);
|
||||
if (dev->flags & IFF_ALLMULTI)
|
||||
dev_set_allmulti(port_dev, -1);
|
||||
|
||||
team_upper_dev_unlink(team, port);
|
||||
netdev_rx_handler_unregister(port_dev);
|
||||
team_port_disable_netpoll(port);
|
||||
|
|
|
@ -2728,7 +2728,7 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
|
|||
num_msdus++;
|
||||
num_bytes += ret;
|
||||
}
|
||||
ieee80211_return_txq(hw, txq);
|
||||
ieee80211_return_txq(hw, txq, false);
|
||||
ieee80211_txq_schedule_end(hw, txq->ac);
|
||||
|
||||
record->num_msdus = cpu_to_le16(num_msdus);
|
||||
|
|
|
@ -4089,7 +4089,7 @@ static int ath10k_mac_schedule_txq(struct ieee80211_hw *hw, u32 ac)
|
|||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
ieee80211_return_txq(hw, txq);
|
||||
ieee80211_return_txq(hw, txq, false);
|
||||
ath10k_htt_tx_txq_update(hw, txq);
|
||||
if (ret == -EBUSY)
|
||||
break;
|
||||
|
@ -4374,7 +4374,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
|
|||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
ieee80211_return_txq(hw, txq);
|
||||
ieee80211_return_txq(hw, txq, false);
|
||||
ath10k_htt_tx_txq_update(hw, txq);
|
||||
out:
|
||||
ieee80211_txq_schedule_end(hw, ac);
|
||||
|
|
|
@ -1938,12 +1938,15 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
|
|||
goto out;
|
||||
|
||||
while ((queue = ieee80211_next_txq(hw, txq->mac80211_qnum))) {
|
||||
bool force;
|
||||
|
||||
tid = (struct ath_atx_tid *)queue->drv_priv;
|
||||
|
||||
ret = ath_tx_sched_aggr(sc, txq, tid);
|
||||
ath_dbg(common, QUEUE, "ath_tx_sched_aggr returned %d\n", ret);
|
||||
|
||||
ieee80211_return_txq(hw, queue);
|
||||
force = !skb_queue_empty(&tid->retry_q);
|
||||
ieee80211_return_txq(hw, queue, force);
|
||||
}
|
||||
|
||||
out:
|
||||
|
|
|
@ -82,6 +82,7 @@
|
|||
#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
|
||||
#define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-"
|
||||
#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-"
|
||||
#define IWL_QUZ_A_HR_B_FW_PRE "iwlwifi-QuZ-a0-hr-b0-"
|
||||
#define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-"
|
||||
#define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-"
|
||||
#define IWL_22000_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-"
|
||||
|
@ -105,8 +106,8 @@
|
|||
IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
|
||||
IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
|
||||
IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \
|
||||
IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
|
||||
IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api) \
|
||||
|
@ -235,8 +236,20 @@ const struct iwl_cfg iwl_ax101_cfg_qu_hr = {
|
|||
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl22260_2ax_cfg = {
|
||||
.name = "Intel(R) Wireless-AX 22260",
|
||||
const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
|
||||
.name = "Intel(R) Wi-Fi 6 AX101",
|
||||
.fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
|
||||
IWL_DEVICE_22500,
|
||||
/*
|
||||
* This device doesn't support receiving BlockAck with a large bitmap
|
||||
* so we need to restrict the size of transmitted aggregation to the
|
||||
* HT size; mac80211 would otherwise pick the HE max (256) by default.
|
||||
*/
|
||||
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl_ax200_cfg_cc = {
|
||||
.name = "Intel(R) Wi-Fi 6 AX200 160MHz",
|
||||
.fw_name_pre = IWL_CC_A_FW_PRE,
|
||||
IWL_DEVICE_22500,
|
||||
/*
|
||||
|
@ -249,7 +262,7 @@ const struct iwl_cfg iwl22260_2ax_cfg = {
|
|||
};
|
||||
|
||||
const struct iwl_cfg killer1650x_2ax_cfg = {
|
||||
.name = "Killer(R) Wireless-AX 1650x Wireless Network Adapter (200NGW)",
|
||||
.name = "Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)",
|
||||
.fw_name_pre = IWL_CC_A_FW_PRE,
|
||||
IWL_DEVICE_22500,
|
||||
/*
|
||||
|
@ -262,7 +275,7 @@ const struct iwl_cfg killer1650x_2ax_cfg = {
|
|||
};
|
||||
|
||||
const struct iwl_cfg killer1650w_2ax_cfg = {
|
||||
.name = "Killer(R) Wireless-AX 1650w Wireless Network Adapter (200D2W)",
|
||||
.name = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)",
|
||||
.fw_name_pre = IWL_CC_A_FW_PRE,
|
||||
IWL_DEVICE_22500,
|
||||
/*
|
||||
|
@ -328,7 +341,7 @@ const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = {
|
|||
};
|
||||
|
||||
const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
|
||||
.name = "Killer(R) Wireless-AX 1650i Wireless Network Adapter (22560NGW)",
|
||||
.name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
|
||||
.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
|
||||
IWL_DEVICE_22500,
|
||||
/*
|
||||
|
@ -340,7 +353,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
|
|||
};
|
||||
|
||||
const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
|
||||
.name = "Killer(R) Wireless-AX 1650s Wireless Network Adapter (22560D2W)",
|
||||
.name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
|
||||
.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
|
||||
IWL_DEVICE_22500,
|
||||
/*
|
||||
|
@ -444,6 +457,7 @@ MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
|||
MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
|
|
|
@ -1614,6 +1614,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
|
|||
if (!range) {
|
||||
IWL_ERR(fwrt, "Failed to fill region header: id=%d, type=%d\n",
|
||||
le32_to_cpu(reg->region_id), type);
|
||||
memset(*data, 0, le32_to_cpu((*data)->len));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1623,6 +1624,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
|
|||
if (range_size < 0) {
|
||||
IWL_ERR(fwrt, "Failed to dump region: id=%d, type=%d\n",
|
||||
le32_to_cpu(reg->region_id), type);
|
||||
memset(*data, 0, le32_to_cpu((*data)->len));
|
||||
return;
|
||||
}
|
||||
range = range + range_size;
|
||||
|
@ -1807,12 +1809,12 @@ _iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
|
|||
|
||||
trigger = fwrt->dump.active_trigs[id].trig;
|
||||
|
||||
size = sizeof(*dump_file);
|
||||
size += iwl_fw_ini_get_trigger_len(fwrt, trigger);
|
||||
|
||||
size = iwl_fw_ini_get_trigger_len(fwrt, trigger);
|
||||
if (!size)
|
||||
return NULL;
|
||||
|
||||
size += sizeof(*dump_file);
|
||||
|
||||
dump_file = vzalloc(size);
|
||||
if (!dump_file)
|
||||
return NULL;
|
||||
|
@ -1942,14 +1944,10 @@ int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
|
|||
iwl_dump_error_desc->len = 0;
|
||||
|
||||
ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
kfree(iwl_dump_error_desc);
|
||||
} else {
|
||||
set_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status);
|
||||
|
||||
/* trigger nmi to halt the fw */
|
||||
iwl_force_nmi(fwrt->trans);
|
||||
}
|
||||
else
|
||||
iwl_trans_sync_nmi(fwrt->trans);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2489,22 +2487,6 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point);
|
|||
|
||||
void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
/* if the wait event timeout elapses instead of wake up then
|
||||
* the driver did not receive NMI interrupt and can not assume the FW
|
||||
* is halted
|
||||
*/
|
||||
int ret = wait_event_timeout(fwrt->trans->fw_halt_waitq,
|
||||
!test_bit(STATUS_FW_WAIT_DUMP,
|
||||
&fwrt->trans->status),
|
||||
msecs_to_jiffies(2000));
|
||||
if (!ret) {
|
||||
/* failed to receive NMI interrupt, assuming the FW is stuck */
|
||||
set_bit(STATUS_FW_ERROR, &fwrt->trans->status);
|
||||
|
||||
clear_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status);
|
||||
}
|
||||
|
||||
/* Assuming the op mode mutex is held at this point */
|
||||
iwl_fw_dbg_collect_sync(fwrt);
|
||||
|
||||
iwl_trans_stop_device(fwrt->trans);
|
||||
|
|
|
@ -76,7 +76,6 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
|
|||
fwrt->ops_ctx = ops_ctx;
|
||||
INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk);
|
||||
iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir);
|
||||
init_waitqueue_head(&fwrt->trans->fw_halt_waitq);
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
|
||||
|
||||
|
|
|
@ -549,8 +549,9 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
|
|||
extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
|
||||
extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
|
||||
extern const struct iwl_cfg iwl_ax101_cfg_qu_hr;
|
||||
extern const struct iwl_cfg iwl_ax101_cfg_quz_hr;
|
||||
extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
|
||||
extern const struct iwl_cfg iwl22260_2ax_cfg;
|
||||
extern const struct iwl_cfg iwl_ax200_cfg_cc;
|
||||
extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0;
|
||||
extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0;
|
||||
extern const struct iwl_cfg killer1650x_2ax_cfg;
|
||||
|
|
|
@ -327,6 +327,7 @@ enum {
|
|||
#define CSR_HW_REV_TYPE_NONE (0x00001F0)
|
||||
#define CSR_HW_REV_TYPE_QNJ (0x0000360)
|
||||
#define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364)
|
||||
#define CSR_HW_REV_TYPE_QUZ (0x0000354)
|
||||
#define CSR_HW_REV_TYPE_HR_CDB (0x0000340)
|
||||
#define CSR_HW_REV_TYPE_SO (0x0000370)
|
||||
#define CSR_HW_REV_TYPE_TY (0x0000420)
|
||||
|
|
|
@ -338,7 +338,6 @@ enum iwl_d3_status {
|
|||
* are sent
|
||||
* @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
|
||||
* @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
|
||||
* @STATUS_FW_WAIT_DUMP: if set, wait until cleared before collecting dump
|
||||
*/
|
||||
enum iwl_trans_status {
|
||||
STATUS_SYNC_HCMD_ACTIVE,
|
||||
|
@ -351,7 +350,6 @@ enum iwl_trans_status {
|
|||
STATUS_TRANS_GOING_IDLE,
|
||||
STATUS_TRANS_IDLE,
|
||||
STATUS_TRANS_DEAD,
|
||||
STATUS_FW_WAIT_DUMP,
|
||||
};
|
||||
|
||||
static inline int
|
||||
|
@ -618,6 +616,7 @@ struct iwl_trans_ops {
|
|||
struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
|
||||
u32 dump_mask);
|
||||
void (*debugfs_cleanup)(struct iwl_trans *trans);
|
||||
void (*sync_nmi)(struct iwl_trans *trans);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -831,7 +830,6 @@ struct iwl_trans {
|
|||
u32 lmac_error_event_table[2];
|
||||
u32 umac_error_event_table;
|
||||
unsigned int error_event_table_tlv_status;
|
||||
wait_queue_head_t fw_halt_waitq;
|
||||
|
||||
/* pointer to trans specific struct */
|
||||
/*Ensure that this pointer will always be aligned to sizeof pointer */
|
||||
|
@ -1239,10 +1237,12 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans)
|
|||
/* prevent double restarts due to the same erroneous FW */
|
||||
if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
|
||||
iwl_op_mode_nic_error(trans->op_mode);
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(STATUS_FW_WAIT_DUMP, &trans->status))
|
||||
wake_up(&trans->fw_halt_waitq);
|
||||
|
||||
static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
|
||||
{
|
||||
if (trans->ops->sync_nmi)
|
||||
trans->ops->sync_nmi(trans);
|
||||
}
|
||||
|
||||
/*****************************************************
|
||||
|
|
|
@ -2714,9 +2714,6 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
|
|||
|
||||
iwl_mvm_mac_ctxt_remove(mvm, vif);
|
||||
|
||||
kfree(mvmvif->ap_wep_key);
|
||||
mvmvif->ap_wep_key = NULL;
|
||||
|
||||
mutex_unlock(&mvm->mutex);
|
||||
}
|
||||
|
||||
|
@ -3183,24 +3180,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
|
|||
ret = iwl_mvm_update_sta(mvm, vif, sta);
|
||||
} else if (old_state == IEEE80211_STA_ASSOC &&
|
||||
new_state == IEEE80211_STA_AUTHORIZED) {
|
||||
/* if wep is used, need to set the key for the station now */
|
||||
if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) {
|
||||
mvm_sta->wep_key =
|
||||
kmemdup(mvmvif->ap_wep_key,
|
||||
sizeof(*mvmvif->ap_wep_key) +
|
||||
mvmvif->ap_wep_key->keylen,
|
||||
GFP_KERNEL);
|
||||
if (!mvm_sta->wep_key) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = iwl_mvm_set_sta_key(mvm, vif, sta,
|
||||
mvm_sta->wep_key,
|
||||
STA_KEY_IDX_INVALID);
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
ret = 0;
|
||||
|
||||
/* we don't support TDLS during DCM */
|
||||
if (iwl_mvm_phy_ctx_count(mvm) > 1)
|
||||
|
@ -3242,17 +3222,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
|
|||
NL80211_TDLS_DISABLE_LINK);
|
||||
}
|
||||
|
||||
/* Remove STA key if this is an AP using WEP */
|
||||
if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) {
|
||||
int rm_ret = iwl_mvm_remove_sta_key(mvm, vif, sta,
|
||||
mvm_sta->wep_key);
|
||||
|
||||
if (!ret)
|
||||
ret = rm_ret;
|
||||
kfree(mvm_sta->wep_key);
|
||||
mvm_sta->wep_key = NULL;
|
||||
}
|
||||
|
||||
if (unlikely(ret &&
|
||||
test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
|
||||
&mvm->status)))
|
||||
|
@ -3289,6 +3258,13 @@ static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
|
|||
struct ieee80211_sta *sta, u32 changed)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
|
||||
if (changed & (IEEE80211_RC_BW_CHANGED |
|
||||
IEEE80211_RC_SUPP_RATES_CHANGED |
|
||||
IEEE80211_RC_NSS_CHANGED))
|
||||
iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
|
||||
true);
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_STATION &&
|
||||
changed & IEEE80211_RC_NSS_CHANGED)
|
||||
|
@ -3439,20 +3415,12 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
|
|||
break;
|
||||
case WLAN_CIPHER_SUITE_WEP40:
|
||||
case WLAN_CIPHER_SUITE_WEP104:
|
||||
if (vif->type == NL80211_IFTYPE_AP) {
|
||||
struct iwl_mvm_vif *mvmvif =
|
||||
iwl_mvm_vif_from_mac80211(vif);
|
||||
|
||||
mvmvif->ap_wep_key = kmemdup(key,
|
||||
sizeof(*key) + key->keylen,
|
||||
GFP_KERNEL);
|
||||
if (!mvmvif->ap_wep_key)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (vif->type != NL80211_IFTYPE_STATION)
|
||||
return 0;
|
||||
break;
|
||||
if (vif->type == NL80211_IFTYPE_STATION)
|
||||
break;
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
return -EOPNOTSUPP;
|
||||
/* support HW crypto on TX */
|
||||
return 0;
|
||||
default:
|
||||
/* currently FW supports only one optional cipher scheme */
|
||||
if (hw->n_cipher_schemes &&
|
||||
|
@ -3540,12 +3508,17 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
|
|||
ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
|
||||
if (ret) {
|
||||
IWL_WARN(mvm, "set key failed\n");
|
||||
key->hw_key_idx = STA_KEY_IDX_INVALID;
|
||||
/*
|
||||
* can't add key for RX, but we don't need it
|
||||
* in the device for TX so still return 0
|
||||
* in the device for TX so still return 0,
|
||||
* unless we have new TX API where we cannot
|
||||
* put key material into the TX_CMD
|
||||
*/
|
||||
key->hw_key_idx = STA_KEY_IDX_INVALID;
|
||||
ret = 0;
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
ret = -EOPNOTSUPP;
|
||||
else
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
break;
|
||||
|
|
|
@ -498,7 +498,6 @@ struct iwl_mvm_vif {
|
|||
netdev_features_t features;
|
||||
|
||||
struct iwl_probe_resp_data __rcu *probe_resp_data;
|
||||
struct ieee80211_key_conf *ap_wep_key;
|
||||
};
|
||||
|
||||
static inline struct iwl_mvm_vif *
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -31,7 +31,7 @@
|
|||
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -1399,7 +1399,9 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
|
|||
|
||||
iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
|
||||
list_del_init(&mvmtxq->list);
|
||||
local_bh_disable();
|
||||
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
@ -2333,21 +2335,6 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
|
||||
timeout);
|
||||
|
||||
if (mvmvif->ap_wep_key) {
|
||||
u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
|
||||
|
||||
__set_bit(key_offset, mvm->fw_key_table);
|
||||
|
||||
if (key_offset == STA_KEY_IDX_INVALID)
|
||||
return -ENOSPC;
|
||||
|
||||
ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id,
|
||||
mvmvif->ap_wep_key, true, 0, NULL, 0,
|
||||
key_offset, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2419,28 +2406,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
|
||||
iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
|
||||
|
||||
if (mvmvif->ap_wep_key) {
|
||||
int i;
|
||||
|
||||
if (!__test_and_clear_bit(mvmvif->ap_wep_key->hw_key_idx,
|
||||
mvm->fw_key_table)) {
|
||||
IWL_ERR(mvm, "offset %d not used in fw key table.\n",
|
||||
mvmvif->ap_wep_key->hw_key_idx);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* track which key was deleted last */
|
||||
for (i = 0; i < STA_KEY_MAX_NUM; i++) {
|
||||
if (mvm->fw_key_deleted[i] < U8_MAX)
|
||||
mvm->fw_key_deleted[i]++;
|
||||
}
|
||||
mvm->fw_key_deleted[mvmvif->ap_wep_key->hw_key_idx] = 0;
|
||||
ret = __iwl_mvm_remove_sta_key(mvm, mvmvif->mcast_sta.sta_id,
|
||||
mvmvif->ap_wep_key, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
|
||||
if (ret)
|
||||
IWL_WARN(mvm, "Failed sending remove station\n");
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -31,7 +31,7 @@
|
|||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -394,7 +394,6 @@ struct iwl_mvm_rxq_dup_data {
|
|||
* the BA window. To be used for UAPSD only.
|
||||
* @ptk_pn: per-queue PTK PN data structures
|
||||
* @dup_data: per queue duplicate packet detection data
|
||||
* @wep_key: used in AP mode. Is a duplicate of the WEP key.
|
||||
* @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID
|
||||
* @tx_ant: the index of the antenna to use for data tx to this station. Only
|
||||
* used during connection establishment (e.g. for the 4 way handshake
|
||||
|
@ -426,8 +425,6 @@ struct iwl_mvm_sta {
|
|||
struct iwl_mvm_key_pn __rcu *ptk_pn[4];
|
||||
struct iwl_mvm_rxq_dup_data *dup_data;
|
||||
|
||||
struct ieee80211_key_conf *wep_key;
|
||||
|
||||
u8 reserved_queue;
|
||||
|
||||
/* Temporary, until the new TLC will control the Tx protection */
|
||||
|
|
|
@ -953,14 +953,15 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
{IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax101_cfg_qu_hr)},
|
||||
|
||||
{IWL_PCI_DEVICE(0x2723, 0x0080, iwl22260_2ax_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2723, 0x0084, iwl22260_2ax_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2723, 0x0088, iwl22260_2ax_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2723, 0x008C, iwl22260_2ax_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2723, 0x0080, iwl_ax200_cfg_cc)},
|
||||
{IWL_PCI_DEVICE(0x2723, 0x0084, iwl_ax200_cfg_cc)},
|
||||
{IWL_PCI_DEVICE(0x2723, 0x0088, iwl_ax200_cfg_cc)},
|
||||
{IWL_PCI_DEVICE(0x2723, 0x008C, iwl_ax200_cfg_cc)},
|
||||
{IWL_PCI_DEVICE(0x2723, 0x1653, killer1650w_2ax_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2723, 0x1654, killer1650x_2ax_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2723, 0x4080, iwl22260_2ax_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2723, 0x4088, iwl22260_2ax_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2723, 0x2080, iwl_ax200_cfg_cc)},
|
||||
{IWL_PCI_DEVICE(0x2723, 0x4080, iwl_ax200_cfg_cc)},
|
||||
{IWL_PCI_DEVICE(0x2723, 0x4088, iwl_ax200_cfg_cc)},
|
||||
|
||||
{IWL_PCI_DEVICE(0x1a56, 0x1653, killer1650w_2ax_cfg)},
|
||||
{IWL_PCI_DEVICE(0x1a56, 0x1654, killer1650x_2ax_cfg)},
|
||||
|
|
|
@ -1043,7 +1043,7 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
|
|||
|
||||
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
|
||||
void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
|
||||
void iwl_trans_sync_nmi(struct iwl_trans *trans);
|
||||
void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
|
||||
|
|
|
@ -3318,7 +3318,8 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
|
|||
.unref = iwl_trans_pcie_unref, \
|
||||
.dump_data = iwl_trans_pcie_dump_data, \
|
||||
.d3_suspend = iwl_trans_pcie_d3_suspend, \
|
||||
.d3_resume = iwl_trans_pcie_d3_resume
|
||||
.d3_resume = iwl_trans_pcie_d3_resume, \
|
||||
.sync_nmi = iwl_trans_pcie_sync_nmi
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
#define IWL_TRANS_PM_OPS \
|
||||
|
@ -3542,6 +3543,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||
}
|
||||
} else if (cfg == &iwl_ax101_cfg_qu_hr) {
|
||||
if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
|
||||
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
|
||||
trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
|
||||
trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
|
||||
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
|
||||
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
|
||||
trans->cfg = &iwl_ax101_cfg_qu_hr;
|
||||
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
|
||||
|
@ -3560,7 +3565,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||
}
|
||||
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
|
||||
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
|
||||
(trans->cfg != &iwl22260_2ax_cfg ||
|
||||
(trans->cfg != &iwl_ax200_cfg_cc ||
|
||||
trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
|
||||
u32 hw_status;
|
||||
|
||||
|
@ -3637,7 +3642,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
void iwl_trans_sync_nmi(struct iwl_trans *trans)
|
||||
void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
|
||||
{
|
||||
unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT;
|
||||
|
||||
|
|
|
@ -965,7 +965,7 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
|
|||
cmd_str);
|
||||
ret = -ETIMEDOUT;
|
||||
|
||||
iwl_trans_sync_nmi(trans);
|
||||
iwl_trans_pcie_sync_nmi(trans);
|
||||
goto cancel;
|
||||
}
|
||||
|
||||
|
|
|
@ -1960,7 +1960,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
|
|||
iwl_get_cmd_string(trans, cmd->id));
|
||||
ret = -ETIMEDOUT;
|
||||
|
||||
iwl_trans_sync_nmi(trans);
|
||||
iwl_trans_pcie_sync_nmi(trans);
|
||||
goto cancel;
|
||||
}
|
||||
|
||||
|
|
|
@ -2644,7 +2644,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
|
|||
enum nl80211_band band;
|
||||
const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
|
||||
struct net *net;
|
||||
int idx;
|
||||
int idx, i;
|
||||
int n_limits = 0;
|
||||
|
||||
if (WARN_ON(param->channels > 1 && !param->use_chanctx))
|
||||
|
@ -2768,12 +2768,23 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
|
|||
goto failed_hw;
|
||||
}
|
||||
|
||||
data->if_combination.max_interfaces = 0;
|
||||
for (i = 0; i < n_limits; i++)
|
||||
data->if_combination.max_interfaces +=
|
||||
data->if_limits[i].max;
|
||||
|
||||
data->if_combination.n_limits = n_limits;
|
||||
data->if_combination.max_interfaces = 2048;
|
||||
data->if_combination.limits = data->if_limits;
|
||||
|
||||
hw->wiphy->iface_combinations = &data->if_combination;
|
||||
hw->wiphy->n_iface_combinations = 1;
|
||||
/*
|
||||
* If we actually were asked to support combinations,
|
||||
* advertise them - if there's only a single thing like
|
||||
* only IBSS then don't advertise it as combinations.
|
||||
*/
|
||||
if (data->if_combination.max_interfaces > 1) {
|
||||
hw->wiphy->iface_combinations = &data->if_combination;
|
||||
hw->wiphy->n_iface_combinations = 1;
|
||||
}
|
||||
|
||||
if (param->ciphers) {
|
||||
memcpy(data->ciphers, param->ciphers,
|
||||
|
|
|
@ -510,6 +510,8 @@ int mt7603_register_device(struct mt7603_dev *dev)
|
|||
bus_ops->rmw = mt7603_rmw;
|
||||
dev->mt76.bus = bus_ops;
|
||||
|
||||
spin_lock_init(&dev->ps_lock);
|
||||
|
||||
INIT_DELAYED_WORK(&dev->mac_work, mt7603_mac_work);
|
||||
tasklet_init(&dev->pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet,
|
||||
(unsigned long)dev);
|
||||
|
|
|
@ -343,7 +343,7 @@ void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid)
|
|||
MT_BA_CONTROL_1_RESET));
|
||||
}
|
||||
|
||||
void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
|
||||
void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
|
||||
int ba_size)
|
||||
{
|
||||
u32 addr = mt7603_wtbl2_addr(wcid);
|
||||
|
@ -358,43 +358,6 @@ void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
|
|||
mt76_clear(dev, addr + (15 * 4), tid_mask);
|
||||
return;
|
||||
}
|
||||
mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
|
||||
|
||||
mt7603_mac_stop(dev);
|
||||
switch (tid) {
|
||||
case 0:
|
||||
mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID0_SN, ssn);
|
||||
break;
|
||||
case 1:
|
||||
mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID1_SN, ssn);
|
||||
break;
|
||||
case 2:
|
||||
mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID2_SN_LO,
|
||||
ssn);
|
||||
mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID2_SN_HI,
|
||||
ssn >> 8);
|
||||
break;
|
||||
case 3:
|
||||
mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID3_SN, ssn);
|
||||
break;
|
||||
case 4:
|
||||
mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID4_SN, ssn);
|
||||
break;
|
||||
case 5:
|
||||
mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID5_SN_LO,
|
||||
ssn);
|
||||
mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID5_SN_HI,
|
||||
ssn >> 4);
|
||||
break;
|
||||
case 6:
|
||||
mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID6_SN, ssn);
|
||||
break;
|
||||
case 7:
|
||||
mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID7_SN, ssn);
|
||||
break;
|
||||
}
|
||||
mt7603_wtbl_update(dev, wcid, MT_WTBL_UPDATE_WTBL2);
|
||||
mt7603_mac_start(dev);
|
||||
|
||||
for (i = 7; i > 0; i--) {
|
||||
if (ba_size >= MT_AGG_SIZE_LIMIT(i))
|
||||
|
@ -827,6 +790,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
|
|||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_tx_rate *rate = &info->control.rates[0];
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
|
||||
struct ieee80211_vif *vif = info->control.vif;
|
||||
struct mt7603_vif *mvif;
|
||||
int wlan_idx;
|
||||
|
@ -834,6 +798,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
|
|||
int tx_count = 8;
|
||||
u8 frame_type, frame_subtype;
|
||||
u16 fc = le16_to_cpu(hdr->frame_control);
|
||||
u16 seqno = 0;
|
||||
u8 vif_idx = 0;
|
||||
u32 val;
|
||||
u8 bw;
|
||||
|
@ -919,7 +884,17 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
|
|||
tx_count = 0x1f;
|
||||
|
||||
val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) |
|
||||
FIELD_PREP(MT_TXD3_SEQ, le16_to_cpu(hdr->seq_ctrl));
|
||||
MT_TXD3_SN_VALID;
|
||||
|
||||
if (ieee80211_is_data_qos(hdr->frame_control))
|
||||
seqno = le16_to_cpu(hdr->seq_ctrl);
|
||||
else if (ieee80211_is_back_req(hdr->frame_control))
|
||||
seqno = le16_to_cpu(bar->start_seq_num);
|
||||
else
|
||||
val &= ~MT_TXD3_SN_VALID;
|
||||
|
||||
val |= FIELD_PREP(MT_TXD3_SEQ, seqno >> 4);
|
||||
|
||||
txwi[3] = cpu_to_le32(val);
|
||||
|
||||
if (key) {
|
||||
|
|
|
@ -372,7 +372,7 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
|
|||
struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
|
||||
struct sk_buff_head list;
|
||||
|
||||
mt76_stop_tx_queues(&dev->mt76, sta, false);
|
||||
mt76_stop_tx_queues(&dev->mt76, sta, true);
|
||||
mt7603_wtbl_set_ps(dev, msta, ps);
|
||||
if (ps)
|
||||
return;
|
||||
|
@ -584,13 +584,13 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
|||
case IEEE80211_AMPDU_TX_OPERATIONAL:
|
||||
mtxq->aggr = true;
|
||||
mtxq->send_bar = false;
|
||||
mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, ba_size);
|
||||
mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, ba_size);
|
||||
break;
|
||||
case IEEE80211_AMPDU_TX_STOP_FLUSH:
|
||||
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
|
||||
mtxq->aggr = false;
|
||||
ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
|
||||
mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1);
|
||||
mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
|
||||
break;
|
||||
case IEEE80211_AMPDU_TX_START:
|
||||
mtxq->agg_ssn = *ssn << 4;
|
||||
|
@ -598,7 +598,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
|||
break;
|
||||
case IEEE80211_AMPDU_TX_STOP_CONT:
|
||||
mtxq->aggr = false;
|
||||
mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1);
|
||||
mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
|
||||
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -200,7 +200,7 @@ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval);
|
|||
int mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb);
|
||||
void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data);
|
||||
void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid);
|
||||
void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
|
||||
void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
|
||||
int ba_size);
|
||||
|
||||
void mt7603_pse_client_reset(struct mt7603_dev *dev);
|
||||
|
|
|
@ -466,7 +466,6 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
|
|||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
mt76_tx_status_lock(mdev, &list);
|
||||
|
||||
if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid))
|
||||
wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
|
||||
|
@ -479,6 +478,8 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
|
|||
drv_priv);
|
||||
}
|
||||
|
||||
mt76_tx_status_lock(mdev, &list);
|
||||
|
||||
if (wcid) {
|
||||
if (stat->pktid >= MT_PACKET_ID_FIRST)
|
||||
status.skb = mt76_tx_status_skb_get(mdev, wcid,
|
||||
|
@ -498,7 +499,9 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
|
|||
if (*update == 0 && stat_val == stat_cache &&
|
||||
stat->wcid == msta->status.wcid && msta->n_frames < 32) {
|
||||
msta->n_frames++;
|
||||
goto out;
|
||||
mt76_tx_status_unlock(mdev, &list);
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
mt76x02_mac_fill_tx_status(dev, status.info, &msta->status,
|
||||
|
@ -514,11 +517,10 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
|
|||
|
||||
if (status.skb)
|
||||
mt76_tx_status_skb_done(mdev, status.skb, &list);
|
||||
else
|
||||
ieee80211_tx_status_ext(mt76_hw(dev), &status);
|
||||
|
||||
out:
|
||||
mt76_tx_status_unlock(mdev, &list);
|
||||
|
||||
if (!status.skb)
|
||||
ieee80211_tx_status_ext(mt76_hw(dev), &status);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
|
|
|
@ -673,7 +673,6 @@ enum rt2x00_state_flags {
|
|||
CONFIG_CHANNEL_HT40,
|
||||
CONFIG_POWERSAVING,
|
||||
CONFIG_HT_DISABLED,
|
||||
CONFIG_QOS_DISABLED,
|
||||
CONFIG_MONITORING,
|
||||
|
||||
/*
|
||||
|
|
|
@ -642,18 +642,8 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
|
|||
rt2x00dev->intf_associated--;
|
||||
|
||||
rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated);
|
||||
|
||||
clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for access point which do not support 802.11e . We have to
|
||||
* generate data frames sequence number in S/W for such AP, because
|
||||
* of H/W bug.
|
||||
*/
|
||||
if (changes & BSS_CHANGED_QOS && !bss_conf->qos)
|
||||
set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
|
||||
|
||||
/*
|
||||
* When the erp information has changed, we should perform
|
||||
* additional configuration steps. For all other changes we are done.
|
||||
|
|
|
@ -201,15 +201,18 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
|
|||
if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
|
||||
/*
|
||||
* rt2800 has a H/W (or F/W) bug, device incorrectly increase
|
||||
* seqno on retransmited data (non-QOS) frames. To workaround
|
||||
* the problem let's generate seqno in software if QOS is
|
||||
* disabled.
|
||||
* seqno on retransmitted data (non-QOS) and management frames.
|
||||
* To workaround the problem let's generate seqno in software.
|
||||
* Except for beacons which are transmitted periodically by H/W
|
||||
* hence hardware has to assign seqno for them.
|
||||
*/
|
||||
if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
|
||||
__clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
|
||||
else
|
||||
if (ieee80211_is_beacon(hdr->frame_control)) {
|
||||
__set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
|
||||
/* H/W will generate sequence number */
|
||||
return;
|
||||
}
|
||||
|
||||
__clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -911,8 +911,12 @@ static int vhost_new_umem_range(struct vhost_umem *umem,
|
|||
u64 start, u64 size, u64 end,
|
||||
u64 userspace_addr, int perm)
|
||||
{
|
||||
struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
|
||||
struct vhost_umem_node *tmp, *node;
|
||||
|
||||
if (!size)
|
||||
return -EFAULT;
|
||||
|
||||
node = kmalloc(sizeof(*node), GFP_ATOMIC);
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -610,6 +610,7 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
|
|||
bool stalled = false;
|
||||
u64 rtt;
|
||||
u32 life, last_life;
|
||||
bool rxrpc_complete = false;
|
||||
|
||||
DECLARE_WAITQUEUE(myself, current);
|
||||
|
||||
|
@ -621,7 +622,7 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
|
|||
rtt2 = 2;
|
||||
|
||||
timeout = rtt2;
|
||||
last_life = rxrpc_kernel_check_life(call->net->socket, call->rxcall);
|
||||
rxrpc_kernel_check_life(call->net->socket, call->rxcall, &last_life);
|
||||
|
||||
add_wait_queue(&call->waitq, &myself);
|
||||
for (;;) {
|
||||
|
@ -639,7 +640,12 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
|
|||
if (afs_check_call_state(call, AFS_CALL_COMPLETE))
|
||||
break;
|
||||
|
||||
life = rxrpc_kernel_check_life(call->net->socket, call->rxcall);
|
||||
if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall, &life)) {
|
||||
/* rxrpc terminated the call. */
|
||||
rxrpc_complete = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (timeout == 0 &&
|
||||
life == last_life && signal_pending(current)) {
|
||||
if (stalled)
|
||||
|
@ -663,12 +669,16 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
|
|||
remove_wait_queue(&call->waitq, &myself);
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
||||
/* Kill off the call if it's still live. */
|
||||
if (!afs_check_call_state(call, AFS_CALL_COMPLETE)) {
|
||||
_debug("call interrupted");
|
||||
if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
|
||||
RX_USER_ABORT, -EINTR, "KWI"))
|
||||
afs_set_call_complete(call, -EINTR, 0);
|
||||
if (rxrpc_complete) {
|
||||
afs_set_call_complete(call, call->error, call->abort_code);
|
||||
} else {
|
||||
/* Kill off the call if it's still live. */
|
||||
_debug("call interrupted");
|
||||
if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
|
||||
RX_USER_ABORT, -EINTR, "KWI"))
|
||||
afs_set_call_complete(call, -EINTR, 0);
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_bh(&call->state_lock);
|
||||
|
|
|
@ -1500,6 +1500,7 @@ struct net_device_ops {
|
|||
* @IFF_FAILOVER: device is a failover master device
|
||||
* @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
|
||||
* @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
|
||||
* @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running
|
||||
*/
|
||||
enum netdev_priv_flags {
|
||||
IFF_802_1Q_VLAN = 1<<0,
|
||||
|
@ -1532,6 +1533,7 @@ enum netdev_priv_flags {
|
|||
IFF_FAILOVER = 1<<27,
|
||||
IFF_FAILOVER_SLAVE = 1<<28,
|
||||
IFF_L3MDEV_RX_HANDLER = 1<<29,
|
||||
IFF_LIVE_RENAME_OK = 1<<30,
|
||||
};
|
||||
|
||||
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
|
||||
|
@ -1563,6 +1565,7 @@ enum netdev_priv_flags {
|
|||
#define IFF_FAILOVER IFF_FAILOVER
|
||||
#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
|
||||
#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
|
||||
#define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK
|
||||
|
||||
/**
|
||||
* struct net_device - The DEVICE structure.
|
||||
|
|
|
@ -61,10 +61,12 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
|
|||
rxrpc_user_attach_call_t, unsigned long, gfp_t,
|
||||
unsigned int);
|
||||
void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
|
||||
u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
|
||||
bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *,
|
||||
u32 *);
|
||||
void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
|
||||
u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
|
||||
bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
|
||||
ktime_t *);
|
||||
bool rxrpc_kernel_call_is_complete(struct rxrpc_call *);
|
||||
|
||||
#endif /* _NET_RXRPC_H */
|
||||
|
|
|
@ -7183,6 +7183,11 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev,
|
|||
#define wiphy_info(wiphy, format, args...) \
|
||||
dev_info(&(wiphy)->dev, format, ##args)
|
||||
|
||||
#define wiphy_err_ratelimited(wiphy, format, args...) \
|
||||
dev_err_ratelimited(&(wiphy)->dev, format, ##args)
|
||||
#define wiphy_warn_ratelimited(wiphy, format, args...) \
|
||||
dev_warn_ratelimited(&(wiphy)->dev, format, ##args)
|
||||
|
||||
#define wiphy_debug(wiphy, format, args...) \
|
||||
wiphy_printk(KERN_DEBUG, wiphy, format, ##args)
|
||||
|
||||
|
|
|
@ -6231,8 +6231,6 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
|
|||
* @hw: pointer as obtained from ieee80211_alloc_hw()
|
||||
* @ac: AC number to return packets from.
|
||||
*
|
||||
* Should only be called between calls to ieee80211_txq_schedule_start()
|
||||
* and ieee80211_txq_schedule_end().
|
||||
* Returns the next txq if successful, %NULL if no queue is eligible. If a txq
|
||||
* is returned, it should be returned with ieee80211_return_txq() after the
|
||||
* driver has finished scheduling it.
|
||||
|
@ -6240,38 +6238,23 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
|
|||
struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac);
|
||||
|
||||
/**
|
||||
* ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
|
||||
*
|
||||
* @hw: pointer as obtained from ieee80211_alloc_hw()
|
||||
* @txq: pointer obtained from station or virtual interface
|
||||
*
|
||||
* Should only be called between calls to ieee80211_txq_schedule_start()
|
||||
* and ieee80211_txq_schedule_end().
|
||||
*/
|
||||
void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
|
||||
|
||||
/**
|
||||
* ieee80211_txq_schedule_start - acquire locks for safe scheduling of an AC
|
||||
* ieee80211_txq_schedule_start - start new scheduling round for TXQs
|
||||
*
|
||||
* @hw: pointer as obtained from ieee80211_alloc_hw()
|
||||
* @ac: AC number to acquire locks for
|
||||
*
|
||||
* Acquire locks needed to schedule TXQs from the given AC. Should be called
|
||||
* before ieee80211_next_txq() or ieee80211_return_txq().
|
||||
* Should be called before ieee80211_next_txq() or ieee80211_return_txq().
|
||||
* The driver must not call multiple TXQ scheduling rounds concurrently.
|
||||
*/
|
||||
void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
|
||||
__acquires(txq_lock);
|
||||
void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac);
|
||||
|
||||
/**
|
||||
* ieee80211_txq_schedule_end - release locks for safe scheduling of an AC
|
||||
*
|
||||
* @hw: pointer as obtained from ieee80211_alloc_hw()
|
||||
* @ac: AC number to acquire locks for
|
||||
*
|
||||
* Release locks previously acquired by ieee80211_txq_schedule_end().
|
||||
*/
|
||||
void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
|
||||
__releases(txq_lock);
|
||||
/* (deprecated) */
|
||||
static inline void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
|
||||
{
|
||||
}
|
||||
|
||||
void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
|
||||
struct ieee80211_txq *txq, bool force);
|
||||
|
||||
/**
|
||||
* ieee80211_schedule_txq - schedule a TXQ for transmission
|
||||
|
@ -6279,12 +6262,34 @@ void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
|
|||
* @hw: pointer as obtained from ieee80211_alloc_hw()
|
||||
* @txq: pointer obtained from station or virtual interface
|
||||
*
|
||||
* Schedules a TXQ for transmission if it is not already scheduled. Takes a
|
||||
* lock, which means it must *not* be called between
|
||||
* ieee80211_txq_schedule_start() and ieee80211_txq_schedule_end()
|
||||
* Schedules a TXQ for transmission if it is not already scheduled,
|
||||
* even if mac80211 does not have any packets buffered.
|
||||
*
|
||||
* The driver may call this function if it has buffered packets for
|
||||
* this TXQ internally.
|
||||
*/
|
||||
void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
|
||||
__acquires(txq_lock) __releases(txq_lock);
|
||||
static inline void
|
||||
ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
|
||||
{
|
||||
__ieee80211_schedule_txq(hw, txq, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
|
||||
*
|
||||
* @hw: pointer as obtained from ieee80211_alloc_hw()
|
||||
* @txq: pointer obtained from station or virtual interface
|
||||
* @force: schedule txq even if mac80211 does not have any buffered packets.
|
||||
*
|
||||
* The driver may set force=true if it has buffered packets for this TXQ
|
||||
* internally.
|
||||
*/
|
||||
static inline void
|
||||
ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
|
||||
bool force)
|
||||
{
|
||||
__ieee80211_schedule_txq(hw, txq, force);
|
||||
}
|
||||
|
||||
/**
|
||||
* ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit
|
||||
|
|
|
@ -266,7 +266,7 @@ void nr_stop_idletimer(struct sock *);
|
|||
int nr_t1timer_running(struct sock *);
|
||||
|
||||
/* sysctl_net_netrom.c */
|
||||
void nr_register_sysctl(void);
|
||||
int nr_register_sysctl(void);
|
||||
void nr_unregister_sysctl(void);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -2084,12 +2084,6 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
|
|||
* @p: poll_table
|
||||
*
|
||||
* See the comments in the wq_has_sleeper function.
|
||||
*
|
||||
* Do not derive sock from filp->private_data here. An SMC socket establishes
|
||||
* an internal TCP socket that is used in the fallback case. All socket
|
||||
* operations on the SMC socket are then forwarded to the TCP socket. In case of
|
||||
* poll, the filp->private_data pointer references the SMC socket because the
|
||||
* TCP socket has no file assigned.
|
||||
*/
|
||||
static inline void sock_poll_wait(struct file *filp, struct socket *sock,
|
||||
poll_table *p)
|
||||
|
|
|
@ -307,6 +307,7 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
|
|||
int tls_device_sendpage(struct sock *sk, struct page *page,
|
||||
int offset, size_t size, int flags);
|
||||
void tls_device_sk_destruct(struct sock *sk);
|
||||
void tls_device_free_resources_tx(struct sock *sk);
|
||||
void tls_device_init(void);
|
||||
void tls_device_cleanup(void);
|
||||
int tls_tx_records(struct sock *sk, int flags);
|
||||
|
@ -330,6 +331,7 @@ int tls_push_sg(struct sock *sk, struct tls_context *ctx,
|
|||
int flags);
|
||||
int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
|
||||
int flags);
|
||||
bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
|
||||
|
||||
static inline struct tls_msg *tls_msg(struct sk_buff *skb)
|
||||
{
|
||||
|
@ -379,7 +381,7 @@ tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
|
|||
static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
|
||||
{
|
||||
#ifdef CONFIG_SOCK_VALIDATE_XMIT
|
||||
return sk_fullsock(sk) &
|
||||
return sk_fullsock(sk) &&
|
||||
(smp_load_acquire(&sk->sk_validate_xmit_skb) ==
|
||||
&tls_validate_xmit_skb);
|
||||
#else
|
||||
|
|
|
@ -710,7 +710,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
|
|||
|
||||
static int lec_mcast_attach(struct atm_vcc *vcc, int arg)
|
||||
{
|
||||
if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg])
|
||||
if (arg < 0 || arg >= MAX_LEC_ITF)
|
||||
return -EINVAL;
|
||||
arg = array_index_nospec(arg, MAX_LEC_ITF);
|
||||
if (!dev_lec[arg])
|
||||
return -EINVAL;
|
||||
vcc->proto_data = dev_lec[arg];
|
||||
return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc);
|
||||
|
@ -728,6 +731,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
|
|||
i = arg;
|
||||
if (arg >= MAX_LEC_ITF)
|
||||
return -EINVAL;
|
||||
i = array_index_nospec(arg, MAX_LEC_ITF);
|
||||
if (!dev_lec[i]) {
|
||||
int size;
|
||||
|
||||
|
|
|
@ -523,12 +523,12 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr,
|
|||
struct sock *sk = sock->sk;
|
||||
int err = 0;
|
||||
|
||||
BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr);
|
||||
|
||||
if (!addr || addr_len < sizeof(struct sockaddr_sco) ||
|
||||
addr->sa_family != AF_BLUETOOTH)
|
||||
return -EINVAL;
|
||||
|
||||
BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr);
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
if (sk->sk_state != BT_OPEN) {
|
||||
|
|
|
@ -197,13 +197,10 @@ static void __br_handle_local_finish(struct sk_buff *skb)
|
|||
/* note: already called with rcu_read_lock */
|
||||
static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct net_bridge_port *p = br_port_get_rcu(skb->dev);
|
||||
|
||||
__br_handle_local_finish(skb);
|
||||
|
||||
BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
|
||||
br_pass_frame_up(skb);
|
||||
return 0;
|
||||
/* return 1 to signal the okfn() was called so it's ok to use the skb */
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -280,10 +277,18 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
|
|||
goto forward;
|
||||
}
|
||||
|
||||
/* Deliver packet to local host only */
|
||||
NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(skb->dev),
|
||||
NULL, skb, skb->dev, NULL, br_handle_local_finish);
|
||||
return RX_HANDLER_CONSUMED;
|
||||
/* The else clause should be hit when nf_hook():
|
||||
* - returns < 0 (drop/error)
|
||||
* - returns = 0 (stolen/nf_queue)
|
||||
* Thus return 1 from the okfn() to signal the skb is ok to pass
|
||||
*/
|
||||
if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
|
||||
dev_net(skb->dev), NULL, skb, skb->dev, NULL,
|
||||
br_handle_local_finish) == 1) {
|
||||
return RX_HANDLER_PASS;
|
||||
} else {
|
||||
return RX_HANDLER_CONSUMED;
|
||||
}
|
||||
}
|
||||
|
||||
forward:
|
||||
|
|
|
@ -2031,7 +2031,8 @@ static void br_multicast_start_querier(struct net_bridge *br,
|
|||
|
||||
__br_multicast_open(br, query);
|
||||
|
||||
list_for_each_entry(port, &br->port_list, list) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(port, &br->port_list, list) {
|
||||
if (port->state == BR_STATE_DISABLED ||
|
||||
port->state == BR_STATE_BLOCKING)
|
||||
continue;
|
||||
|
@ -2043,6 +2044,7 @@ static void br_multicast_start_querier(struct net_bridge *br,
|
|||
br_multicast_enable(&port->ip6_own_query);
|
||||
#endif
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
int br_multicast_toggle(struct net_bridge *br, unsigned long val)
|
||||
|
|
|
@ -1441,7 +1441,7 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
|
|||
nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED,
|
||||
br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) ||
|
||||
nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT,
|
||||
br_opt_get(br, IFLA_BR_VLAN_STATS_PER_PORT)))
|
||||
br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)))
|
||||
return -EMSGSIZE;
|
||||
#endif
|
||||
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
|
||||
|
|
|
@ -1184,7 +1184,21 @@ int dev_change_name(struct net_device *dev, const char *newname)
|
|||
BUG_ON(!dev_net(dev));
|
||||
|
||||
net = dev_net(dev);
|
||||
if (dev->flags & IFF_UP)
|
||||
|
||||
/* Some auto-enslaved devices e.g. failover slaves are
|
||||
* special, as userspace might rename the device after
|
||||
* the interface had been brought up and running since
|
||||
* the point kernel initiated auto-enslavement. Allow
|
||||
* live name change even when these slave devices are
|
||||
* up and running.
|
||||
*
|
||||
* Typically, users of these auto-enslaving devices
|
||||
* don't actually care about slave name change, as
|
||||
* they are supposed to operate on master interface
|
||||
* directly.
|
||||
*/
|
||||
if (dev->flags & IFF_UP &&
|
||||
likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
|
||||
return -EBUSY;
|
||||
|
||||
write_seqcount_begin(&devnet_rename_seq);
|
||||
|
|
|
@ -80,14 +80,14 @@ static int failover_slave_register(struct net_device *slave_dev)
|
|||
goto err_upper_link;
|
||||
}
|
||||
|
||||
slave_dev->priv_flags |= IFF_FAILOVER_SLAVE;
|
||||
slave_dev->priv_flags |= (IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
|
||||
|
||||
if (fops && fops->slave_register &&
|
||||
!fops->slave_register(slave_dev, failover_dev))
|
||||
return NOTIFY_OK;
|
||||
|
||||
netdev_upper_dev_unlink(slave_dev, failover_dev);
|
||||
slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE;
|
||||
slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
|
||||
err_upper_link:
|
||||
netdev_rx_handler_unregister(slave_dev);
|
||||
done:
|
||||
|
@ -121,7 +121,7 @@ int failover_slave_unregister(struct net_device *slave_dev)
|
|||
|
||||
netdev_rx_handler_unregister(slave_dev);
|
||||
netdev_upper_dev_unlink(slave_dev, failover_dev);
|
||||
slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE;
|
||||
slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
|
||||
|
||||
if (fops && fops->slave_unregister &&
|
||||
!fops->slave_unregister(slave_dev, failover_dev))
|
||||
|
|
|
@ -4383,6 +4383,8 @@ BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr,
|
|||
* Only binding to IP is supported.
|
||||
*/
|
||||
err = -EINVAL;
|
||||
if (addr_len < offsetofend(struct sockaddr, sa_family))
|
||||
return err;
|
||||
if (addr->sa_family == AF_INET) {
|
||||
if (addr_len < sizeof(struct sockaddr_in))
|
||||
return err;
|
||||
|
|
|
@ -1747,20 +1747,16 @@ int netdev_register_kobject(struct net_device *ndev)
|
|||
|
||||
error = device_add(dev);
|
||||
if (error)
|
||||
goto error_put_device;
|
||||
return error;
|
||||
|
||||
error = register_queue_kobjects(ndev);
|
||||
if (error)
|
||||
goto error_device_del;
|
||||
if (error) {
|
||||
device_del(dev);
|
||||
return error;
|
||||
}
|
||||
|
||||
pm_runtime_set_memalloc_noio(dev, true);
|
||||
|
||||
return 0;
|
||||
|
||||
error_device_del:
|
||||
device_del(dev);
|
||||
error_put_device:
|
||||
put_device(dev);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
|
|
@ -185,9 +185,10 @@ void __init ptp_classifier_init(void)
|
|||
{ 0x16, 0, 0, 0x00000000 },
|
||||
{ 0x06, 0, 0, 0x00000000 },
|
||||
};
|
||||
struct sock_fprog_kern ptp_prog = {
|
||||
.len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter,
|
||||
};
|
||||
struct sock_fprog_kern ptp_prog;
|
||||
|
||||
ptp_prog.len = ARRAY_SIZE(ptp_filter);
|
||||
ptp_prog.filter = ptp_filter;
|
||||
|
||||
BUG_ON(bpf_prog_create(&ptp_insns, &ptp_prog));
|
||||
}
|
||||
|
|
|
@ -4948,7 +4948,7 @@ static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
|
|||
{
|
||||
struct if_stats_msg *ifsm;
|
||||
|
||||
if (nlh->nlmsg_len < sizeof(*ifsm)) {
|
||||
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) {
|
||||
NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -5083,7 +5083,8 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
|
|||
|
||||
static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
|
||||
{
|
||||
int mac_len;
|
||||
int mac_len, meta_len;
|
||||
void *meta;
|
||||
|
||||
if (skb_cow(skb, skb_headroom(skb)) < 0) {
|
||||
kfree_skb(skb);
|
||||
|
@ -5095,6 +5096,13 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
|
|||
memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
|
||||
mac_len - VLAN_HLEN - ETH_TLEN);
|
||||
}
|
||||
|
||||
meta_len = skb_metadata_len(skb);
|
||||
if (meta_len) {
|
||||
meta = skb_metadata_end(skb) - meta_len;
|
||||
memmove(meta + VLAN_HLEN, meta, meta_len);
|
||||
}
|
||||
|
||||
skb->mac_header += VLAN_HLEN;
|
||||
return skb;
|
||||
}
|
||||
|
|
|
@ -348,7 +348,7 @@ static int sock_get_timeout(long timeo, void *optval, bool old_timeval)
|
|||
tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ;
|
||||
}
|
||||
|
||||
if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
|
||||
if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
|
||||
struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec };
|
||||
*(struct old_timeval32 *)optval = tv32;
|
||||
return sizeof(tv32);
|
||||
|
@ -372,7 +372,7 @@ static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen, bool
|
|||
{
|
||||
struct __kernel_sock_timeval tv;
|
||||
|
||||
if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
|
||||
if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
|
||||
struct old_timeval32 tv32;
|
||||
|
||||
if (optlen < sizeof(tv32))
|
||||
|
|
|
@ -121,6 +121,7 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
|
|||
struct guehdr *guehdr;
|
||||
void *data;
|
||||
u16 doffset = 0;
|
||||
u8 proto_ctype;
|
||||
|
||||
if (!fou)
|
||||
return 1;
|
||||
|
@ -212,13 +213,14 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
|
|||
if (unlikely(guehdr->control))
|
||||
return gue_control_message(skb, guehdr);
|
||||
|
||||
proto_ctype = guehdr->proto_ctype;
|
||||
__skb_pull(skb, sizeof(struct udphdr) + hdrlen);
|
||||
skb_reset_transport_header(skb);
|
||||
|
||||
if (iptunnel_pull_offloads(skb))
|
||||
goto drop;
|
||||
|
||||
return -guehdr->proto_ctype;
|
||||
return -proto_ctype;
|
||||
|
||||
drop:
|
||||
kfree_skb(skb);
|
||||
|
|
|
@ -1185,9 +1185,23 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
|
|||
|
||||
static void ipv4_link_failure(struct sk_buff *skb)
|
||||
{
|
||||
struct ip_options opt;
|
||||
struct rtable *rt;
|
||||
int res;
|
||||
|
||||
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
|
||||
/* Recompile ip options since IPCB may not be valid anymore.
|
||||
*/
|
||||
memset(&opt, 0, sizeof(opt));
|
||||
opt.optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
|
||||
|
||||
rcu_read_lock();
|
||||
res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (res)
|
||||
return;
|
||||
|
||||
__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
|
||||
|
||||
rt = skb_rtable(skb);
|
||||
if (rt)
|
||||
|
|
|
@ -49,9 +49,8 @@
|
|||
#define DCTCP_MAX_ALPHA 1024U
|
||||
|
||||
struct dctcp {
|
||||
u32 acked_bytes_ecn;
|
||||
u32 acked_bytes_total;
|
||||
u32 prior_snd_una;
|
||||
u32 old_delivered;
|
||||
u32 old_delivered_ce;
|
||||
u32 prior_rcv_nxt;
|
||||
u32 dctcp_alpha;
|
||||
u32 next_seq;
|
||||
|
@ -73,8 +72,8 @@ static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
|
|||
{
|
||||
ca->next_seq = tp->snd_nxt;
|
||||
|
||||
ca->acked_bytes_ecn = 0;
|
||||
ca->acked_bytes_total = 0;
|
||||
ca->old_delivered = tp->delivered;
|
||||
ca->old_delivered_ce = tp->delivered_ce;
|
||||
}
|
||||
|
||||
static void dctcp_init(struct sock *sk)
|
||||
|
@ -86,7 +85,6 @@ static void dctcp_init(struct sock *sk)
|
|||
sk->sk_state == TCP_CLOSE)) {
|
||||
struct dctcp *ca = inet_csk_ca(sk);
|
||||
|
||||
ca->prior_snd_una = tp->snd_una;
|
||||
ca->prior_rcv_nxt = tp->rcv_nxt;
|
||||
|
||||
ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
|
||||
|
@ -118,37 +116,25 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
|
|||
{
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct dctcp *ca = inet_csk_ca(sk);
|
||||
u32 acked_bytes = tp->snd_una - ca->prior_snd_una;
|
||||
|
||||
/* If ack did not advance snd_una, count dupack as MSS size.
|
||||
* If ack did update window, do not count it at all.
|
||||
*/
|
||||
if (acked_bytes == 0 && !(flags & CA_ACK_WIN_UPDATE))
|
||||
acked_bytes = inet_csk(sk)->icsk_ack.rcv_mss;
|
||||
if (acked_bytes) {
|
||||
ca->acked_bytes_total += acked_bytes;
|
||||
ca->prior_snd_una = tp->snd_una;
|
||||
|
||||
if (flags & CA_ACK_ECE)
|
||||
ca->acked_bytes_ecn += acked_bytes;
|
||||
}
|
||||
|
||||
/* Expired RTT */
|
||||
if (!before(tp->snd_una, ca->next_seq)) {
|
||||
u64 bytes_ecn = ca->acked_bytes_ecn;
|
||||
u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce;
|
||||
u32 alpha = ca->dctcp_alpha;
|
||||
|
||||
/* alpha = (1 - g) * alpha + g * F */
|
||||
|
||||
alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
|
||||
if (bytes_ecn) {
|
||||
/* If dctcp_shift_g == 1, a 32bit value would overflow
|
||||
* after 8 Mbytes.
|
||||
*/
|
||||
bytes_ecn <<= (10 - dctcp_shift_g);
|
||||
do_div(bytes_ecn, max(1U, ca->acked_bytes_total));
|
||||
if (delivered_ce) {
|
||||
u32 delivered = tp->delivered - ca->old_delivered;
|
||||
|
||||
alpha = min(alpha + (u32)bytes_ecn, DCTCP_MAX_ALPHA);
|
||||
/* If dctcp_shift_g == 1, a 32bit value would overflow
|
||||
* after 8 M packets.
|
||||
*/
|
||||
delivered_ce <<= (10 - dctcp_shift_g);
|
||||
delivered_ce /= max(1U, delivered);
|
||||
|
||||
alpha = min(alpha + delivered_ce, DCTCP_MAX_ALPHA);
|
||||
}
|
||||
/* dctcp_alpha can be read from dctcp_get_info() without
|
||||
* synchro, so we ask compiler to not use dctcp_alpha
|
||||
|
@ -200,6 +186,7 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
|
|||
union tcp_cc_info *info)
|
||||
{
|
||||
const struct dctcp *ca = inet_csk_ca(sk);
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
/* Fill it also in case of VEGASINFO due to req struct limits.
|
||||
* We can still correctly retrieve it later.
|
||||
|
@ -211,8 +198,10 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
|
|||
info->dctcp.dctcp_enabled = 1;
|
||||
info->dctcp.dctcp_ce_state = (u16) ca->ce_state;
|
||||
info->dctcp.dctcp_alpha = ca->dctcp_alpha;
|
||||
info->dctcp.dctcp_ab_ecn = ca->acked_bytes_ecn;
|
||||
info->dctcp.dctcp_ab_tot = ca->acked_bytes_total;
|
||||
info->dctcp.dctcp_ab_ecn = tp->mss_cache *
|
||||
(tp->delivered_ce - ca->old_delivered_ce);
|
||||
info->dctcp.dctcp_ab_tot = tp->mss_cache *
|
||||
(tp->delivered - ca->old_delivered);
|
||||
}
|
||||
|
||||
*attr = INET_DIAG_DCTCPINFO;
|
||||
|
|
|
@ -402,11 +402,12 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
|
|||
static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
int room;
|
||||
|
||||
room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
|
||||
|
||||
/* Check #1 */
|
||||
if (tp->rcv_ssthresh < tp->window_clamp &&
|
||||
(int)tp->rcv_ssthresh < tcp_space(sk) &&
|
||||
!tcp_under_memory_pressure(sk)) {
|
||||
if (room > 0 && !tcp_under_memory_pressure(sk)) {
|
||||
int incr;
|
||||
|
||||
/* Check #2. Increase window, if skb with such overhead
|
||||
|
@ -419,8 +420,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
|
|||
|
||||
if (incr) {
|
||||
incr = max_t(int, incr, 2 * skb->len);
|
||||
tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
|
||||
tp->window_clamp);
|
||||
tp->rcv_ssthresh += min(room, incr);
|
||||
inet_csk(sk)->icsk_ack.quick |= 1;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2330,6 +2330,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
|
|||
|
||||
rcu_read_lock();
|
||||
from = rcu_dereference(rt6->from);
|
||||
if (!from) {
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
nrt6 = ip6_rt_cache_alloc(from, daddr, saddr);
|
||||
if (nrt6) {
|
||||
rt6_do_update_pmtu(nrt6, mtu);
|
||||
|
|
|
@ -1047,6 +1047,8 @@ static void udp_v6_flush_pending_frames(struct sock *sk)
|
|||
static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
|
||||
int addr_len)
|
||||
{
|
||||
if (addr_len < offsetofend(struct sockaddr, sa_family))
|
||||
return -EINVAL;
|
||||
/* The following checks are replicated from __ip6_datagram_connect()
|
||||
* and intended to prevent BPF program called below from accessing
|
||||
* bytes that are out of the bound specified by user in addr_len.
|
||||
|
|
|
@ -320,14 +320,13 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
|
|||
struct llc_sap *sap;
|
||||
int rc = -EINVAL;
|
||||
|
||||
dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
|
||||
|
||||
lock_sock(sk);
|
||||
if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
|
||||
goto out;
|
||||
rc = -EAFNOSUPPORT;
|
||||
if (unlikely(addr->sllc_family != AF_LLC))
|
||||
goto out;
|
||||
dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
|
||||
rc = -ENODEV;
|
||||
rcu_read_lock();
|
||||
if (sk->sk_bound_dev_if) {
|
||||
|
|
|
@ -1195,6 +1195,9 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
|
|||
{
|
||||
struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif);
|
||||
|
||||
if (local->in_reconfig)
|
||||
return;
|
||||
|
||||
if (!check_sdata_in_driver(sdata))
|
||||
return;
|
||||
|
||||
|
|
|
@ -167,8 +167,10 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
|
|||
* The driver doesn't know anything about VLAN interfaces.
|
||||
* Hence, don't send GTKs for VLAN interfaces to the driver.
|
||||
*/
|
||||
if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE))
|
||||
if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
|
||||
ret = 1;
|
||||
goto out_unsupported;
|
||||
}
|
||||
}
|
||||
|
||||
ret = drv_set_key(key->local, SET_KEY, sdata,
|
||||
|
@ -213,11 +215,8 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
|
|||
/* all of these we can do in software - if driver can */
|
||||
if (ret == 1)
|
||||
return 0;
|
||||
if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL)) {
|
||||
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
|
||||
return 0;
|
||||
if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL))
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
|
|
@ -23,7 +23,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath);
|
|||
static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
|
||||
{
|
||||
/* Use last four bytes of hw addr as hash index */
|
||||
return jhash_1word(*(u32 *)(addr+2), seed);
|
||||
return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed);
|
||||
}
|
||||
|
||||
static const struct rhashtable_params mesh_rht_params = {
|
||||
|
|
|
@ -1568,7 +1568,15 @@ static void sta_ps_start(struct sta_info *sta)
|
|||
return;
|
||||
|
||||
for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
|
||||
if (txq_has_queue(sta->sta.txq[tid]))
|
||||
struct ieee80211_txq *txq = sta->sta.txq[tid];
|
||||
struct txq_info *txqi = to_txq_info(txq);
|
||||
|
||||
spin_lock(&local->active_txq_lock[txq->ac]);
|
||||
if (!list_empty(&txqi->schedule_order))
|
||||
list_del_init(&txqi->schedule_order);
|
||||
spin_unlock(&local->active_txq_lock[txq->ac]);
|
||||
|
||||
if (txq_has_queue(txq))
|
||||
set_bit(tid, &sta->txq_buffered_tids);
|
||||
else
|
||||
clear_bit(tid, &sta->txq_buffered_tids);
|
||||
|
|
|
@ -1,4 +1,9 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Portions of this file
|
||||
* Copyright (C) 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_MAC80211_MESSAGE_TRACING
|
||||
|
||||
#if !defined(__MAC80211_MSG_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ)
|
||||
|
@ -11,7 +16,7 @@
|
|||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM mac80211_msg
|
||||
|
||||
#define MAX_MSG_LEN 100
|
||||
#define MAX_MSG_LEN 120
|
||||
|
||||
DECLARE_EVENT_CLASS(mac80211_msg_event,
|
||||
TP_PROTO(struct va_format *vaf),
|
||||
|
|
|
@ -3221,6 +3221,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
|
|||
u8 max_subframes = sta->sta.max_amsdu_subframes;
|
||||
int max_frags = local->hw.max_tx_fragments;
|
||||
int max_amsdu_len = sta->sta.max_amsdu_len;
|
||||
int orig_truesize;
|
||||
__be16 len;
|
||||
void *data;
|
||||
bool ret = false;
|
||||
|
@ -3261,6 +3262,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
|
|||
if (!head || skb_is_gso(head))
|
||||
goto out;
|
||||
|
||||
orig_truesize = head->truesize;
|
||||
orig_len = head->len;
|
||||
|
||||
if (skb->len + head->len > max_amsdu_len)
|
||||
|
@ -3318,6 +3320,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
|
|||
*frag_tail = skb;
|
||||
|
||||
out_recalc:
|
||||
fq->memory_usage += head->truesize - orig_truesize;
|
||||
if (head->len != orig_len) {
|
||||
flow->backlog += head->len - orig_len;
|
||||
tin->backlog_bytes += head->len - orig_len;
|
||||
|
@ -3646,16 +3649,17 @@ EXPORT_SYMBOL(ieee80211_tx_dequeue);
|
|||
struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
|
||||
{
|
||||
struct ieee80211_local *local = hw_to_local(hw);
|
||||
struct ieee80211_txq *ret = NULL;
|
||||
struct txq_info *txqi = NULL;
|
||||
|
||||
lockdep_assert_held(&local->active_txq_lock[ac]);
|
||||
spin_lock_bh(&local->active_txq_lock[ac]);
|
||||
|
||||
begin:
|
||||
txqi = list_first_entry_or_null(&local->active_txqs[ac],
|
||||
struct txq_info,
|
||||
schedule_order);
|
||||
if (!txqi)
|
||||
return NULL;
|
||||
goto out;
|
||||
|
||||
if (txqi->txq.sta) {
|
||||
struct sta_info *sta = container_of(txqi->txq.sta,
|
||||
|
@ -3672,24 +3676,30 @@ struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
|
|||
|
||||
|
||||
if (txqi->schedule_round == local->schedule_round[ac])
|
||||
return NULL;
|
||||
goto out;
|
||||
|
||||
list_del_init(&txqi->schedule_order);
|
||||
txqi->schedule_round = local->schedule_round[ac];
|
||||
return &txqi->txq;
|
||||
ret = &txqi->txq;
|
||||
|
||||
out:
|
||||
spin_unlock_bh(&local->active_txq_lock[ac]);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ieee80211_next_txq);
|
||||
|
||||
void ieee80211_return_txq(struct ieee80211_hw *hw,
|
||||
struct ieee80211_txq *txq)
|
||||
void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
|
||||
struct ieee80211_txq *txq,
|
||||
bool force)
|
||||
{
|
||||
struct ieee80211_local *local = hw_to_local(hw);
|
||||
struct txq_info *txqi = to_txq_info(txq);
|
||||
|
||||
lockdep_assert_held(&local->active_txq_lock[txq->ac]);
|
||||
spin_lock_bh(&local->active_txq_lock[txq->ac]);
|
||||
|
||||
if (list_empty(&txqi->schedule_order) &&
|
||||
(!skb_queue_empty(&txqi->frags) || txqi->tin.backlog_packets)) {
|
||||
(force || !skb_queue_empty(&txqi->frags) ||
|
||||
txqi->tin.backlog_packets)) {
|
||||
/* If airtime accounting is active, always enqueue STAs at the
|
||||
* head of the list to ensure that they only get moved to the
|
||||
* back by the airtime DRR scheduler once they have a negative
|
||||
|
@ -3706,20 +3716,10 @@ void ieee80211_return_txq(struct ieee80211_hw *hw,
|
|||
list_add_tail(&txqi->schedule_order,
|
||||
&local->active_txqs[txq->ac]);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(ieee80211_return_txq);
|
||||
|
||||
void ieee80211_schedule_txq(struct ieee80211_hw *hw,
|
||||
struct ieee80211_txq *txq)
|
||||
__acquires(txq_lock) __releases(txq_lock)
|
||||
{
|
||||
struct ieee80211_local *local = hw_to_local(hw);
|
||||
|
||||
spin_lock_bh(&local->active_txq_lock[txq->ac]);
|
||||
ieee80211_return_txq(hw, txq);
|
||||
spin_unlock_bh(&local->active_txq_lock[txq->ac]);
|
||||
}
|
||||
EXPORT_SYMBOL(ieee80211_schedule_txq);
|
||||
EXPORT_SYMBOL(__ieee80211_schedule_txq);
|
||||
|
||||
bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
|
||||
struct ieee80211_txq *txq)
|
||||
|
@ -3729,7 +3729,7 @@ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
|
|||
struct sta_info *sta;
|
||||
u8 ac = txq->ac;
|
||||
|
||||
lockdep_assert_held(&local->active_txq_lock[ac]);
|
||||
spin_lock_bh(&local->active_txq_lock[ac]);
|
||||
|
||||
if (!txqi->txq.sta)
|
||||
goto out;
|
||||
|
@ -3759,34 +3759,27 @@ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
|
|||
|
||||
sta->airtime[ac].deficit += sta->airtime_weight;
|
||||
list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]);
|
||||
spin_unlock_bh(&local->active_txq_lock[ac]);
|
||||
|
||||
return false;
|
||||
out:
|
||||
if (!list_empty(&txqi->schedule_order))
|
||||
list_del_init(&txqi->schedule_order);
|
||||
spin_unlock_bh(&local->active_txq_lock[ac]);
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(ieee80211_txq_may_transmit);
|
||||
|
||||
void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
|
||||
__acquires(txq_lock)
|
||||
{
|
||||
struct ieee80211_local *local = hw_to_local(hw);
|
||||
|
||||
spin_lock_bh(&local->active_txq_lock[ac]);
|
||||
local->schedule_round[ac]++;
|
||||
}
|
||||
EXPORT_SYMBOL(ieee80211_txq_schedule_start);
|
||||
|
||||
void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
|
||||
__releases(txq_lock)
|
||||
{
|
||||
struct ieee80211_local *local = hw_to_local(hw);
|
||||
|
||||
spin_unlock_bh(&local->active_txq_lock[ac]);
|
||||
}
|
||||
EXPORT_SYMBOL(ieee80211_txq_schedule_end);
|
||||
EXPORT_SYMBOL(ieee80211_txq_schedule_start);
|
||||
|
||||
void __ieee80211_subif_start_xmit(struct sk_buff *skb,
|
||||
struct net_device *dev,
|
||||
|
|
|
@ -988,7 +988,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
|
|||
struct netlink_sock *nlk = nlk_sk(sk);
|
||||
struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
|
||||
int err = 0;
|
||||
unsigned long groups = nladdr->nl_groups;
|
||||
unsigned long groups;
|
||||
bool bound;
|
||||
|
||||
if (addr_len < sizeof(struct sockaddr_nl))
|
||||
|
@ -996,6 +996,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
|
|||
|
||||
if (nladdr->nl_family != AF_NETLINK)
|
||||
return -EINVAL;
|
||||
groups = nladdr->nl_groups;
|
||||
|
||||
/* Only superuser is allowed to listen multicasts */
|
||||
if (groups) {
|
||||
|
|
|
@ -1392,18 +1392,22 @@ static int __init nr_proto_init(void)
|
|||
int i;
|
||||
int rc = proto_register(&nr_proto, 0);
|
||||
|
||||
if (rc != 0)
|
||||
goto out;
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) {
|
||||
printk(KERN_ERR "NET/ROM: nr_proto_init - nr_ndevs parameter to large\n");
|
||||
return -1;
|
||||
pr_err("NET/ROM: %s - nr_ndevs parameter too large\n",
|
||||
__func__);
|
||||
rc = -EINVAL;
|
||||
goto unregister_proto;
|
||||
}
|
||||
|
||||
dev_nr = kcalloc(nr_ndevs, sizeof(struct net_device *), GFP_KERNEL);
|
||||
if (dev_nr == NULL) {
|
||||
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n");
|
||||
return -1;
|
||||
if (!dev_nr) {
|
||||
pr_err("NET/ROM: %s - unable to allocate device array\n",
|
||||
__func__);
|
||||
rc = -ENOMEM;
|
||||
goto unregister_proto;
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_ndevs; i++) {
|
||||
|
@ -1413,13 +1417,13 @@ static int __init nr_proto_init(void)
|
|||
sprintf(name, "nr%d", i);
|
||||
dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, nr_setup);
|
||||
if (!dev) {
|
||||
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n");
|
||||
rc = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
dev->base_addr = i;
|
||||
if (register_netdev(dev)) {
|
||||
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register network device\n");
|
||||
rc = register_netdev(dev);
|
||||
if (rc) {
|
||||
free_netdev(dev);
|
||||
goto fail;
|
||||
}
|
||||
|
@ -1427,36 +1431,64 @@ static int __init nr_proto_init(void)
|
|||
dev_nr[i] = dev;
|
||||
}
|
||||
|
||||
if (sock_register(&nr_family_ops)) {
|
||||
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register socket family\n");
|
||||
rc = sock_register(&nr_family_ops);
|
||||
if (rc)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
register_netdevice_notifier(&nr_dev_notifier);
|
||||
rc = register_netdevice_notifier(&nr_dev_notifier);
|
||||
if (rc)
|
||||
goto out_sock;
|
||||
|
||||
ax25_register_pid(&nr_pid);
|
||||
ax25_linkfail_register(&nr_linkfail_notifier);
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
nr_register_sysctl();
|
||||
rc = nr_register_sysctl();
|
||||
if (rc)
|
||||
goto out_sysctl;
|
||||
#endif
|
||||
|
||||
nr_loopback_init();
|
||||
|
||||
proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops);
|
||||
proc_create_seq("nr_neigh", 0444, init_net.proc_net, &nr_neigh_seqops);
|
||||
proc_create_seq("nr_nodes", 0444, init_net.proc_net, &nr_node_seqops);
|
||||
out:
|
||||
return rc;
|
||||
rc = -ENOMEM;
|
||||
if (!proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops))
|
||||
goto proc_remove1;
|
||||
if (!proc_create_seq("nr_neigh", 0444, init_net.proc_net,
|
||||
&nr_neigh_seqops))
|
||||
goto proc_remove2;
|
||||
if (!proc_create_seq("nr_nodes", 0444, init_net.proc_net,
|
||||
&nr_node_seqops))
|
||||
goto proc_remove3;
|
||||
|
||||
return 0;
|
||||
|
||||
proc_remove3:
|
||||
remove_proc_entry("nr_neigh", init_net.proc_net);
|
||||
proc_remove2:
|
||||
remove_proc_entry("nr", init_net.proc_net);
|
||||
proc_remove1:
|
||||
|
||||
nr_loopback_clear();
|
||||
nr_rt_free();
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
nr_unregister_sysctl();
|
||||
out_sysctl:
|
||||
#endif
|
||||
ax25_linkfail_release(&nr_linkfail_notifier);
|
||||
ax25_protocol_release(AX25_P_NETROM);
|
||||
unregister_netdevice_notifier(&nr_dev_notifier);
|
||||
out_sock:
|
||||
sock_unregister(PF_NETROM);
|
||||
fail:
|
||||
while (--i >= 0) {
|
||||
unregister_netdev(dev_nr[i]);
|
||||
free_netdev(dev_nr[i]);
|
||||
}
|
||||
kfree(dev_nr);
|
||||
unregister_proto:
|
||||
proto_unregister(&nr_proto);
|
||||
rc = -1;
|
||||
goto out;
|
||||
return rc;
|
||||
}
|
||||
|
||||
module_init(nr_proto_init);
|
||||
|
|
|
@ -70,7 +70,7 @@ static void nr_loopback_timer(struct timer_list *unused)
|
|||
}
|
||||
}
|
||||
|
||||
void __exit nr_loopback_clear(void)
|
||||
void nr_loopback_clear(void)
|
||||
{
|
||||
del_timer_sync(&loopback_timer);
|
||||
skb_queue_purge(&loopback_queue);
|
||||
|
|
|
@ -953,7 +953,7 @@ const struct seq_operations nr_neigh_seqops = {
|
|||
/*
|
||||
* Free all memory associated with the nodes and routes lists.
|
||||
*/
|
||||
void __exit nr_rt_free(void)
|
||||
void nr_rt_free(void)
|
||||
{
|
||||
struct nr_neigh *s = NULL;
|
||||
struct nr_node *t = NULL;
|
||||
|
|
|
@ -146,9 +146,12 @@ static struct ctl_table nr_table[] = {
|
|||
{ }
|
||||
};
|
||||
|
||||
void __init nr_register_sysctl(void)
|
||||
int __init nr_register_sysctl(void)
|
||||
{
|
||||
nr_table_header = register_net_sysctl(&init_net, "net/netrom", nr_table);
|
||||
if (!nr_table_header)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nr_unregister_sysctl(void)
|
||||
|
|
|
@ -543,6 +543,9 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr,
|
|||
struct rds_sock *rs = rds_sk_to_rs(sk);
|
||||
int ret = 0;
|
||||
|
||||
if (addr_len < offsetofend(struct sockaddr, sa_family))
|
||||
return -EINVAL;
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
switch (uaddr->sa_family) {
|
||||
|
|
|
@ -173,6 +173,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
|
|||
/* We allow an RDS socket to be bound to either IPv4 or IPv6
|
||||
* address.
|
||||
*/
|
||||
if (addr_len < offsetofend(struct sockaddr, sa_family))
|
||||
return -EINVAL;
|
||||
if (uaddr->sa_family == AF_INET) {
|
||||
struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue