mirror of https://gitee.com/openkylin/linux.git
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (42 commits) ipv4: dont create routes on down devices epic100: hamachi: yellowfin: Fix skb allocation size sundance: Fix oopses with corrupted skb_shared_info Revert "ipv4: Allow configuring subnets as local addresses" USB: mcs7830: return negative if auto negotiate fails irda: prevent integer underflow in IRLMP_ENUMDEVICES tcp: fix listening_get_next() atl1c: Do not use legacy PCI power management mac80211: fix mesh forwarding MAINTAINERS: email address change net: Fix range checks in tcf_valid_offset(). net_sched: sch_sfq: fix allot handling hostap: remove netif_stop_queue from init mac80211/rt2x00: add ieee80211_tx_status_ni() typhoon: memory corruption in typhoon_get_drvinfo() net: Add USB PID for new MOSCHIP USB ethernet controller MCS7832 variant net_sched: always clone skbs ipv6: Fragment locally generated tunnel-mode IPSec6 packets as needed. netlink: fix gcc -Wconversion compilation warning asix: add USB ID for Logitec LAN-GTJ U2A ...
This commit is contained in:
commit
d7c1255a3a
|
@ -4590,7 +4590,7 @@ F: drivers/pcmcia/
|
|||
F: include/pcmcia/
|
||||
|
||||
PCNET32 NETWORK DRIVER
|
||||
M: Don Fry <pcnet32@verizon.net>
|
||||
M: Don Fry <pcnet32@frontier.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/pcnet32.c
|
||||
|
|
|
@ -311,8 +311,10 @@ static void hci_uart_tty_close(struct tty_struct *tty)
|
|||
|
||||
if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
|
||||
hu->proto->close(hu);
|
||||
hci_unregister_dev(hdev);
|
||||
hci_free_dev(hdev);
|
||||
if (hdev) {
|
||||
hci_unregister_dev(hdev);
|
||||
hci_free_dev(hdev);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -702,6 +702,7 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
|
|||
|
||||
|
||||
adapter->wol = 0;
|
||||
device_set_wakeup_enable(&pdev->dev, false);
|
||||
adapter->link_speed = SPEED_0;
|
||||
adapter->link_duplex = FULL_DUPLEX;
|
||||
adapter->num_rx_queues = AT_DEF_RECEIVE_QUEUE;
|
||||
|
@ -2444,8 +2445,9 @@ static int atl1c_close(struct net_device *netdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
static int atl1c_suspend(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct atl1c_adapter *adapter = netdev_priv(netdev);
|
||||
struct atl1c_hw *hw = &adapter->hw;
|
||||
|
@ -2454,7 +2456,6 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
|
|||
u32 wol_ctrl_data = 0;
|
||||
u16 mii_intr_status_data = 0;
|
||||
u32 wufc = adapter->wol;
|
||||
int retval = 0;
|
||||
|
||||
atl1c_disable_l0s_l1(hw);
|
||||
if (netif_running(netdev)) {
|
||||
|
@ -2462,9 +2463,6 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
|
|||
atl1c_down(adapter);
|
||||
}
|
||||
netif_device_detach(netdev);
|
||||
retval = pci_save_state(pdev);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
if (wufc)
|
||||
if (atl1c_phy_power_saving(hw) != 0)
|
||||
|
@ -2525,12 +2523,8 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
|
|||
AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
|
||||
AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
|
||||
|
||||
/* pcie patch */
|
||||
device_set_wakeup_enable(&pdev->dev, 1);
|
||||
|
||||
AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT |
|
||||
GPHY_CTRL_EXT_RESET);
|
||||
pci_prepare_to_sleep(pdev);
|
||||
} else {
|
||||
AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_POWER_SAVING);
|
||||
master_ctrl_data |= MASTER_CTRL_CLK_SEL_DIS;
|
||||
|
@ -2540,25 +2534,17 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
|
|||
AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
|
||||
AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
|
||||
hw->phy_configured = false; /* re-init PHY when resume */
|
||||
pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
|
||||
}
|
||||
|
||||
pci_disable_device(pdev);
|
||||
pci_set_power_state(pdev, pci_choose_state(pdev, state));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int atl1c_resume(struct pci_dev *pdev)
|
||||
static int atl1c_resume(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct atl1c_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
pci_set_power_state(pdev, PCI_D0);
|
||||
pci_restore_state(pdev);
|
||||
pci_enable_wake(pdev, PCI_D3hot, 0);
|
||||
pci_enable_wake(pdev, PCI_D3cold, 0);
|
||||
|
||||
AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
|
||||
atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE |
|
||||
ATL1C_PCIE_PHY_RESET);
|
||||
|
@ -2582,7 +2568,12 @@ static int atl1c_resume(struct pci_dev *pdev)
|
|||
|
||||
static void atl1c_shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
atl1c_suspend(pdev, PMSG_SUSPEND);
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct atl1c_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
atl1c_suspend(&pdev->dev);
|
||||
pci_wake_from_d3(pdev, adapter->wol);
|
||||
pci_set_power_state(pdev, PCI_D3hot);
|
||||
}
|
||||
|
||||
static const struct net_device_ops atl1c_netdev_ops = {
|
||||
|
@ -2886,16 +2877,16 @@ static struct pci_error_handlers atl1c_err_handler = {
|
|||
.resume = atl1c_io_resume,
|
||||
};
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(atl1c_pm_ops, atl1c_suspend, atl1c_resume);
|
||||
|
||||
static struct pci_driver atl1c_driver = {
|
||||
.name = atl1c_driver_name,
|
||||
.id_table = atl1c_pci_tbl,
|
||||
.probe = atl1c_probe,
|
||||
.remove = __devexit_p(atl1c_remove),
|
||||
/* Power Managment Hooks */
|
||||
.suspend = atl1c_suspend,
|
||||
.resume = atl1c_resume,
|
||||
.shutdown = atl1c_shutdown,
|
||||
.err_handler = &atl1c_err_handler
|
||||
.err_handler = &atl1c_err_handler,
|
||||
.driver.pm = &atl1c_pm_ops,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -234,7 +234,7 @@ struct be_adapter {
|
|||
u8 __iomem *db; /* Door Bell */
|
||||
u8 __iomem *pcicfg; /* PCI config space */
|
||||
|
||||
spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */
|
||||
struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
|
||||
struct be_dma_mem mbox_mem;
|
||||
/* Mbox mem is adjusted to align to 16 bytes. The allocated addr
|
||||
* is stored for freeing purpose */
|
||||
|
|
|
@ -462,7 +462,8 @@ int be_cmd_fw_init(struct be_adapter *adapter)
|
|||
u8 *wrb;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
if (mutex_lock_interruptible(&adapter->mbox_lock))
|
||||
return -1;
|
||||
|
||||
wrb = (u8 *)wrb_from_mbox(adapter);
|
||||
*wrb++ = 0xFF;
|
||||
|
@ -476,7 +477,7 @@ int be_cmd_fw_init(struct be_adapter *adapter)
|
|||
|
||||
status = be_mbox_notify_wait(adapter);
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
mutex_unlock(&adapter->mbox_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -491,7 +492,8 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
|
|||
if (adapter->eeh_err)
|
||||
return -EIO;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
if (mutex_lock_interruptible(&adapter->mbox_lock))
|
||||
return -1;
|
||||
|
||||
wrb = (u8 *)wrb_from_mbox(adapter);
|
||||
*wrb++ = 0xFF;
|
||||
|
@ -505,7 +507,7 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
|
|||
|
||||
status = be_mbox_notify_wait(adapter);
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
mutex_unlock(&adapter->mbox_lock);
|
||||
return status;
|
||||
}
|
||||
int be_cmd_eq_create(struct be_adapter *adapter,
|
||||
|
@ -516,7 +518,8 @@ int be_cmd_eq_create(struct be_adapter *adapter,
|
|||
struct be_dma_mem *q_mem = &eq->dma_mem;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
if (mutex_lock_interruptible(&adapter->mbox_lock))
|
||||
return -1;
|
||||
|
||||
wrb = wrb_from_mbox(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
@ -546,7 +549,7 @@ int be_cmd_eq_create(struct be_adapter *adapter,
|
|||
eq->created = true;
|
||||
}
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
mutex_unlock(&adapter->mbox_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -558,7 +561,8 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
|
|||
struct be_cmd_req_mac_query *req;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
if (mutex_lock_interruptible(&adapter->mbox_lock))
|
||||
return -1;
|
||||
|
||||
wrb = wrb_from_mbox(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
@ -583,7 +587,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
|
|||
memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
|
||||
}
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
mutex_unlock(&adapter->mbox_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -667,7 +671,8 @@ int be_cmd_cq_create(struct be_adapter *adapter,
|
|||
void *ctxt;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
if (mutex_lock_interruptible(&adapter->mbox_lock))
|
||||
return -1;
|
||||
|
||||
wrb = wrb_from_mbox(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
@ -701,7 +706,7 @@ int be_cmd_cq_create(struct be_adapter *adapter,
|
|||
cq->created = true;
|
||||
}
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
mutex_unlock(&adapter->mbox_lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
@ -724,7 +729,8 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
|
|||
void *ctxt;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
if (mutex_lock_interruptible(&adapter->mbox_lock))
|
||||
return -1;
|
||||
|
||||
wrb = wrb_from_mbox(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
@ -754,7 +760,7 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
|
|||
mccq->id = le16_to_cpu(resp->id);
|
||||
mccq->created = true;
|
||||
}
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
mutex_unlock(&adapter->mbox_lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
@ -769,7 +775,8 @@ int be_cmd_txq_create(struct be_adapter *adapter,
|
|||
void *ctxt;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
if (mutex_lock_interruptible(&adapter->mbox_lock))
|
||||
return -1;
|
||||
|
||||
wrb = wrb_from_mbox(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
@ -801,7 +808,7 @@ int be_cmd_txq_create(struct be_adapter *adapter,
|
|||
txq->created = true;
|
||||
}
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
mutex_unlock(&adapter->mbox_lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
@ -816,7 +823,8 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
|
|||
struct be_dma_mem *q_mem = &rxq->dma_mem;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
if (mutex_lock_interruptible(&adapter->mbox_lock))
|
||||
return -1;
|
||||
|
||||
wrb = wrb_from_mbox(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
@ -843,7 +851,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
|
|||
*rss_id = resp->rss_id;
|
||||
}
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
mutex_unlock(&adapter->mbox_lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
@ -862,7 +870,8 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
|
|||
if (adapter->eeh_err)
|
||||
return -EIO;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
if (mutex_lock_interruptible(&adapter->mbox_lock))
|
||||
return -1;
|
||||
|
||||
wrb = wrb_from_mbox(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
@ -899,7 +908,7 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
|
|||
|
||||
status = be_mbox_notify_wait(adapter);
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
mutex_unlock(&adapter->mbox_lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
@ -915,7 +924,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
|
|||
struct be_cmd_req_if_create *req;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
if (mutex_lock_interruptible(&adapter->mbox_lock))
|
||||
return -1;
|
||||
|
||||
wrb = wrb_from_mbox(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
@ -941,7 +951,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
|
|||
*pmac_id = le32_to_cpu(resp->pmac_id);
|
||||
}
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
mutex_unlock(&adapter->mbox_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -955,7 +965,8 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
|
|||
if (adapter->eeh_err)
|
||||
return -EIO;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
if (mutex_lock_interruptible(&adapter->mbox_lock))
|
||||
return -1;
|
||||
|
||||
wrb = wrb_from_mbox(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
@ -970,7 +981,7 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
|
|||
|
||||
status = be_mbox_notify_wait(adapter);
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
mutex_unlock(&adapter->mbox_lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
@ -1060,7 +1071,8 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
|
|||
struct be_cmd_req_get_fw_version *req;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
if (mutex_lock_interruptible(&adapter->mbox_lock))
|
||||
return -1;
|
||||
|
||||
wrb = wrb_from_mbox(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
@ -1077,7 +1089,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
|
|||
strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
|
||||
}
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
mutex_unlock(&adapter->mbox_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -1322,7 +1334,8 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
|
|||
struct be_cmd_req_query_fw_cfg *req;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
if (mutex_lock_interruptible(&adapter->mbox_lock))
|
||||
return -1;
|
||||
|
||||
wrb = wrb_from_mbox(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
@ -1341,7 +1354,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
|
|||
*caps = le32_to_cpu(resp->function_caps);
|
||||
}
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
mutex_unlock(&adapter->mbox_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -1352,7 +1365,8 @@ int be_cmd_reset_function(struct be_adapter *adapter)
|
|||
struct be_cmd_req_hdr *req;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
if (mutex_lock_interruptible(&adapter->mbox_lock))
|
||||
return -1;
|
||||
|
||||
wrb = wrb_from_mbox(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
@ -1365,7 +1379,7 @@ int be_cmd_reset_function(struct be_adapter *adapter)
|
|||
|
||||
status = be_mbox_notify_wait(adapter);
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
mutex_unlock(&adapter->mbox_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -1376,7 +1390,8 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
|
|||
u32 myhash[10];
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
if (mutex_lock_interruptible(&adapter->mbox_lock))
|
||||
return -1;
|
||||
|
||||
wrb = wrb_from_mbox(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
@ -1396,7 +1411,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
|
|||
|
||||
status = be_mbox_notify_wait(adapter);
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
mutex_unlock(&adapter->mbox_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
|
|
@ -2677,7 +2677,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
|
|||
}
|
||||
memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
|
||||
|
||||
spin_lock_init(&adapter->mbox_lock);
|
||||
mutex_init(&adapter->mbox_lock);
|
||||
spin_lock_init(&adapter->mcc_lock);
|
||||
spin_lock_init(&adapter->mcc_cq_lock);
|
||||
|
||||
|
|
|
@ -88,7 +88,12 @@ static void bond_na_send(struct net_device *slave_dev,
|
|||
}
|
||||
|
||||
if (vlan_id) {
|
||||
skb = vlan_put_tag(skb, vlan_id);
|
||||
/* The Ethernet header is not present yet, so it is
|
||||
* too early to insert a VLAN tag. Force use of an
|
||||
* out-of-line tag here and let dev_hard_start_xmit()
|
||||
* insert it if the slave hardware can't.
|
||||
*/
|
||||
skb = __vlan_hwaccel_put_tag(skb, vlan_id);
|
||||
if (!skb) {
|
||||
pr_err("failed to insert VLAN tag\n");
|
||||
return;
|
||||
|
|
|
@ -418,36 +418,11 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
|
|||
* @bond: bond device that got this skb for tx.
|
||||
* @skb: hw accel VLAN tagged skb to transmit
|
||||
* @slave_dev: slave that is supposed to xmit this skbuff
|
||||
*
|
||||
* When the bond gets an skb to transmit that is
|
||||
* already hardware accelerated VLAN tagged, and it
|
||||
* needs to relay this skb to a slave that is not
|
||||
* hw accel capable, the skb needs to be "unaccelerated",
|
||||
* i.e. strip the hwaccel tag and re-insert it as part
|
||||
* of the payload.
|
||||
*/
|
||||
int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
|
||||
struct net_device *slave_dev)
|
||||
{
|
||||
unsigned short uninitialized_var(vlan_id);
|
||||
|
||||
/* Test vlan_list not vlgrp to catch and handle 802.1p tags */
|
||||
if (!list_empty(&bond->vlan_list) &&
|
||||
!(slave_dev->features & NETIF_F_HW_VLAN_TX) &&
|
||||
vlan_get_tag(skb, &vlan_id) == 0) {
|
||||
skb->dev = slave_dev;
|
||||
skb = vlan_put_tag(skb, vlan_id);
|
||||
if (!skb) {
|
||||
/* vlan_put_tag() frees the skb in case of error,
|
||||
* so return success here so the calling functions
|
||||
* won't attempt to free is again.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
skb->dev = slave_dev;
|
||||
}
|
||||
|
||||
skb->dev = slave_dev;
|
||||
skb->priority = 1;
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) {
|
||||
|
@ -1203,11 +1178,13 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
|
|||
bond_do_fail_over_mac(bond, new_active,
|
||||
old_active);
|
||||
|
||||
bond->send_grat_arp = bond->params.num_grat_arp;
|
||||
bond_send_gratuitous_arp(bond);
|
||||
if (netif_running(bond->dev)) {
|
||||
bond->send_grat_arp = bond->params.num_grat_arp;
|
||||
bond_send_gratuitous_arp(bond);
|
||||
|
||||
bond->send_unsol_na = bond->params.num_unsol_na;
|
||||
bond_send_unsolicited_na(bond);
|
||||
bond->send_unsol_na = bond->params.num_unsol_na;
|
||||
bond_send_unsolicited_na(bond);
|
||||
}
|
||||
|
||||
write_unlock_bh(&bond->curr_slave_lock);
|
||||
read_unlock(&bond->lock);
|
||||
|
@ -1221,8 +1198,9 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
|
|||
|
||||
/* resend IGMP joins since active slave has changed or
|
||||
* all were sent on curr_active_slave */
|
||||
if ((USES_PRIMARY(bond->params.mode) && new_active) ||
|
||||
bond->params.mode == BOND_MODE_ROUNDROBIN) {
|
||||
if (((USES_PRIMARY(bond->params.mode) && new_active) ||
|
||||
bond->params.mode == BOND_MODE_ROUNDROBIN) &&
|
||||
netif_running(bond->dev)) {
|
||||
bond->igmp_retrans = bond->params.resend_igmp;
|
||||
queue_delayed_work(bond->wq, &bond->mcast_work, 0);
|
||||
}
|
||||
|
|
|
@ -269,11 +269,11 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct n
|
|||
|
||||
bond_for_each_slave(bond, slave, i) {
|
||||
if (slave->dev == slave_dev) {
|
||||
break;
|
||||
return slave;
|
||||
}
|
||||
}
|
||||
|
||||
return slave;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
|
||||
|
|
|
@ -935,7 +935,7 @@ static void epic_init_ring(struct net_device *dev)
|
|||
|
||||
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
|
||||
for (i = 0; i < RX_RING_SIZE; i++) {
|
||||
struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);
|
||||
struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz + 2);
|
||||
ep->rx_skbuff[i] = skb;
|
||||
if (skb == NULL)
|
||||
break;
|
||||
|
@ -1233,7 +1233,7 @@ static int epic_rx(struct net_device *dev, int budget)
|
|||
entry = ep->dirty_rx % RX_RING_SIZE;
|
||||
if (ep->rx_skbuff[entry] == NULL) {
|
||||
struct sk_buff *skb;
|
||||
skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
|
||||
skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz + 2);
|
||||
if (skb == NULL)
|
||||
break;
|
||||
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
||||
|
|
|
@ -1202,7 +1202,7 @@ static void hamachi_init_ring(struct net_device *dev)
|
|||
}
|
||||
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
|
||||
for (i = 0; i < RX_RING_SIZE; i++) {
|
||||
struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz);
|
||||
struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz + 2);
|
||||
hmp->rx_skbuff[i] = skb;
|
||||
if (skb == NULL)
|
||||
break;
|
||||
|
@ -1669,7 +1669,7 @@ static int hamachi_rx(struct net_device *dev)
|
|||
entry = hmp->dirty_rx % RX_RING_SIZE;
|
||||
desc = &(hmp->rx_ring[entry]);
|
||||
if (hmp->rx_skbuff[entry] == NULL) {
|
||||
struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz);
|
||||
struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz + 2);
|
||||
|
||||
hmp->rx_skbuff[entry] = skb;
|
||||
if (skb == NULL)
|
||||
|
|
|
@ -690,6 +690,7 @@ static void block_output(struct net_device *dev, int count,
|
|||
static struct pcmcia_device_id axnet_ids[] = {
|
||||
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x016c, 0x0081),
|
||||
PCMCIA_DEVICE_MANF_CARD(0x018a, 0x0301),
|
||||
PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x2328),
|
||||
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0301),
|
||||
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0303),
|
||||
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0309),
|
||||
|
|
|
@ -1493,7 +1493,6 @@ static struct pcmcia_device_id pcnet_ids[] = {
|
|||
PCMCIA_DEVICE_MANF_CARD(0x0149, 0x4530),
|
||||
PCMCIA_DEVICE_MANF_CARD(0x0149, 0xc1ab),
|
||||
PCMCIA_DEVICE_MANF_CARD(0x0186, 0x0110),
|
||||
PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x2328),
|
||||
PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x8041),
|
||||
PCMCIA_DEVICE_MANF_CARD(0x0213, 0x2452),
|
||||
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0300),
|
||||
|
|
|
@ -1016,7 +1016,7 @@ static void init_ring(struct net_device *dev)
|
|||
|
||||
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
|
||||
for (i = 0; i < RX_RING_SIZE; i++) {
|
||||
struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
|
||||
struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + 2);
|
||||
np->rx_skbuff[i] = skb;
|
||||
if (skb == NULL)
|
||||
break;
|
||||
|
@ -1407,7 +1407,7 @@ static void refill_rx (struct net_device *dev)
|
|||
struct sk_buff *skb;
|
||||
entry = np->dirty_rx % RX_RING_SIZE;
|
||||
if (np->rx_skbuff[entry] == NULL) {
|
||||
skb = dev_alloc_skb(np->rx_buf_sz);
|
||||
skb = dev_alloc_skb(np->rx_buf_sz + 2);
|
||||
np->rx_skbuff[entry] = skb;
|
||||
if (skb == NULL)
|
||||
break; /* Better luck next round. */
|
||||
|
|
|
@ -324,7 +324,7 @@ static int bdx_fw_load(struct bdx_priv *priv)
|
|||
ENTER;
|
||||
master = READ_REG(priv, regINIT_SEMAPHORE);
|
||||
if (!READ_REG(priv, regINIT_STATUS) && master) {
|
||||
rc = request_firmware(&fw, "tehuti/firmware.bin", &priv->pdev->dev);
|
||||
rc = request_firmware(&fw, "tehuti/bdx.bin", &priv->pdev->dev);
|
||||
if (rc)
|
||||
goto out;
|
||||
bdx_tx_push_desc_safe(priv, (char *)fw->data, fw->size);
|
||||
|
@ -2510,4 +2510,4 @@ module_exit(bdx_module_exit);
|
|||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR(DRIVER_AUTHOR);
|
||||
MODULE_DESCRIPTION(BDX_DRV_DESC);
|
||||
MODULE_FIRMWARE("tehuti/firmware.bin");
|
||||
MODULE_FIRMWARE("tehuti/bdx.bin");
|
||||
|
|
|
@ -1004,7 +1004,6 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
|
|||
}
|
||||
|
||||
strcpy(info->driver, KBUILD_MODNAME);
|
||||
strcpy(info->version, UTS_RELEASE);
|
||||
strcpy(info->bus_info, pci_name(pci_dev));
|
||||
}
|
||||
|
||||
|
|
|
@ -1507,6 +1507,10 @@ static const struct usb_device_id products [] = {
|
|||
// ASIX AX88178 10/100/1000
|
||||
USB_DEVICE (0x0b95, 0x1780),
|
||||
.driver_info = (unsigned long) &ax88178_info,
|
||||
}, {
|
||||
// Logitec LAN-GTJ/U2A
|
||||
USB_DEVICE (0x0789, 0x0160),
|
||||
.driver_info = (unsigned long) &ax88178_info,
|
||||
}, {
|
||||
// Linksys USB200M Rev 2
|
||||
USB_DEVICE (0x13b1, 0x0018),
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* MOSCHIP MCS7830 based USB 2.0 Ethernet Devices
|
||||
* MOSCHIP MCS7830 based (7730/7830/7832) USB 2.0 Ethernet Devices
|
||||
*
|
||||
* based on usbnet.c, asix.c and the vendor provided mcs7830 driver
|
||||
*
|
||||
|
@ -11,6 +11,9 @@
|
|||
*
|
||||
* Definitions gathered from MOSCHIP, Data Sheet_7830DA.pdf (thanks!).
|
||||
*
|
||||
* 2010-12-19: add 7832 USB PID ("functionality same as MCS7830"),
|
||||
* per active notification by manufacturer
|
||||
*
|
||||
* TODO:
|
||||
* - support HIF_REG_CONFIG_SLEEPMODE/HIF_REG_CONFIG_TXENABLE (via autopm?)
|
||||
* - implement ethtool_ops get_pauseparam/set_pauseparam
|
||||
|
@ -60,6 +63,7 @@
|
|||
#define MCS7830_MAX_MCAST 64
|
||||
|
||||
#define MCS7830_VENDOR_ID 0x9710
|
||||
#define MCS7832_PRODUCT_ID 0x7832
|
||||
#define MCS7830_PRODUCT_ID 0x7830
|
||||
#define MCS7730_PRODUCT_ID 0x7730
|
||||
|
||||
|
@ -351,7 +355,7 @@ static int mcs7830_set_autoneg(struct usbnet *dev, int ptrUserPhyMode)
|
|||
if (!ret)
|
||||
ret = mcs7830_write_phy(dev, MII_BMCR,
|
||||
BMCR_ANENABLE | BMCR_ANRESTART );
|
||||
return ret < 0 ? : 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
@ -626,7 +630,7 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
|||
}
|
||||
|
||||
static const struct driver_info moschip_info = {
|
||||
.description = "MOSCHIP 7830/7730 usb-NET adapter",
|
||||
.description = "MOSCHIP 7830/7832/7730 usb-NET adapter",
|
||||
.bind = mcs7830_bind,
|
||||
.rx_fixup = mcs7830_rx_fixup,
|
||||
.flags = FLAG_ETHER,
|
||||
|
@ -644,6 +648,10 @@ static const struct driver_info sitecom_info = {
|
|||
};
|
||||
|
||||
static const struct usb_device_id products[] = {
|
||||
{
|
||||
USB_DEVICE(MCS7830_VENDOR_ID, MCS7832_PRODUCT_ID),
|
||||
.driver_info = (unsigned long) &moschip_info,
|
||||
},
|
||||
{
|
||||
USB_DEVICE(MCS7830_VENDOR_ID, MCS7830_PRODUCT_ID),
|
||||
.driver_info = (unsigned long) &moschip_info,
|
||||
|
|
|
@ -166,7 +166,9 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
if (!(rcv->flags & IFF_UP))
|
||||
goto tx_drop;
|
||||
|
||||
if (dev->features & NETIF_F_NO_CSUM)
|
||||
/* don't change ip_summed == CHECKSUM_PARTIAL, as that
|
||||
will cause bad checksum on forwarded packets */
|
||||
if (skb->ip_summed == CHECKSUM_NONE)
|
||||
skb->ip_summed = rcv_priv->ip_summed;
|
||||
|
||||
length = skb->len + ETH_HLEN;
|
||||
|
|
|
@ -891,7 +891,6 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local,
|
|||
|
||||
SET_ETHTOOL_OPS(dev, &prism2_ethtool_ops);
|
||||
|
||||
netif_stop_queue(dev);
|
||||
}
|
||||
|
||||
static int hostap_enable_hostapd(local_info_t *local, int rtnl_locked)
|
||||
|
|
|
@ -315,6 +315,7 @@ struct iwl_cfg iwl100_bgn_cfg = {
|
|||
.mod_params = &iwlagn_mod_params,
|
||||
.base_params = &iwl1000_base_params,
|
||||
.ht_params = &iwl1000_ht_params,
|
||||
.use_new_eeprom_reading = true,
|
||||
};
|
||||
|
||||
struct iwl_cfg iwl100_bg_cfg = {
|
||||
|
@ -330,6 +331,7 @@ struct iwl_cfg iwl100_bg_cfg = {
|
|||
.ops = &iwl1000_ops,
|
||||
.mod_params = &iwlagn_mod_params,
|
||||
.base_params = &iwl1000_base_params,
|
||||
.use_new_eeprom_reading = true,
|
||||
};
|
||||
|
||||
MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
|
||||
|
|
|
@ -561,6 +561,7 @@ struct iwl_cfg iwl6000g2a_2agn_cfg = {
|
|||
.ht_params = &iwl6000_ht_params,
|
||||
.need_dc_calib = true,
|
||||
.need_temp_offset_calib = true,
|
||||
.use_new_eeprom_reading = true,
|
||||
};
|
||||
|
||||
struct iwl_cfg iwl6000g2a_2abg_cfg = {
|
||||
|
@ -578,6 +579,7 @@ struct iwl_cfg iwl6000g2a_2abg_cfg = {
|
|||
.base_params = &iwl6000_base_params,
|
||||
.need_dc_calib = true,
|
||||
.need_temp_offset_calib = true,
|
||||
.use_new_eeprom_reading = true,
|
||||
};
|
||||
|
||||
struct iwl_cfg iwl6000g2a_2bg_cfg = {
|
||||
|
@ -595,6 +597,7 @@ struct iwl_cfg iwl6000g2a_2bg_cfg = {
|
|||
.base_params = &iwl6000_base_params,
|
||||
.need_dc_calib = true,
|
||||
.need_temp_offset_calib = true,
|
||||
.use_new_eeprom_reading = true,
|
||||
};
|
||||
|
||||
struct iwl_cfg iwl6000g2b_2agn_cfg = {
|
||||
|
@ -616,6 +619,7 @@ struct iwl_cfg iwl6000g2b_2agn_cfg = {
|
|||
.need_temp_offset_calib = true,
|
||||
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
|
||||
.scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
|
||||
.use_new_eeprom_reading = true,
|
||||
};
|
||||
|
||||
struct iwl_cfg iwl6000g2b_2abg_cfg = {
|
||||
|
@ -636,6 +640,7 @@ struct iwl_cfg iwl6000g2b_2abg_cfg = {
|
|||
.need_temp_offset_calib = true,
|
||||
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
|
||||
.scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
|
||||
.use_new_eeprom_reading = true,
|
||||
};
|
||||
|
||||
struct iwl_cfg iwl6000g2b_2bgn_cfg = {
|
||||
|
@ -657,6 +662,7 @@ struct iwl_cfg iwl6000g2b_2bgn_cfg = {
|
|||
.need_temp_offset_calib = true,
|
||||
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
|
||||
.scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
|
||||
.use_new_eeprom_reading = true,
|
||||
};
|
||||
|
||||
struct iwl_cfg iwl6000g2b_2bg_cfg = {
|
||||
|
@ -677,6 +683,7 @@ struct iwl_cfg iwl6000g2b_2bg_cfg = {
|
|||
.need_temp_offset_calib = true,
|
||||
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
|
||||
.scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
|
||||
.use_new_eeprom_reading = true,
|
||||
};
|
||||
|
||||
struct iwl_cfg iwl6000g2b_bgn_cfg = {
|
||||
|
@ -698,6 +705,7 @@ struct iwl_cfg iwl6000g2b_bgn_cfg = {
|
|||
.need_temp_offset_calib = true,
|
||||
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
|
||||
.scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
|
||||
.use_new_eeprom_reading = true,
|
||||
};
|
||||
|
||||
struct iwl_cfg iwl6000g2b_bg_cfg = {
|
||||
|
@ -718,6 +726,7 @@ struct iwl_cfg iwl6000g2b_bg_cfg = {
|
|||
.need_temp_offset_calib = true,
|
||||
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
|
||||
.scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
|
||||
.use_new_eeprom_reading = true,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -804,6 +813,7 @@ struct iwl_cfg iwl6050g2_bgn_cfg = {
|
|||
.base_params = &iwl6050_base_params,
|
||||
.ht_params = &iwl6000_ht_params,
|
||||
.need_dc_calib = true,
|
||||
.use_new_eeprom_reading = true,
|
||||
};
|
||||
|
||||
struct iwl_cfg iwl6050_2abg_cfg = {
|
||||
|
@ -857,6 +867,7 @@ struct iwl_cfg iwl130_bgn_cfg = {
|
|||
.need_dc_calib = true,
|
||||
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
|
||||
.scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
|
||||
.use_new_eeprom_reading = true,
|
||||
};
|
||||
|
||||
struct iwl_cfg iwl130_bg_cfg = {
|
||||
|
@ -876,6 +887,7 @@ struct iwl_cfg iwl130_bg_cfg = {
|
|||
.need_dc_calib = true,
|
||||
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
|
||||
.scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
|
||||
.use_new_eeprom_reading = true,
|
||||
};
|
||||
|
||||
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
|
||||
|
|
|
@ -392,7 +392,7 @@ static s8 iwl_update_channel_txpower(struct iwl_priv *priv,
|
|||
/**
|
||||
* iwlcore_eeprom_enhanced_txpower: process enhanced tx power info
|
||||
*/
|
||||
void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv)
|
||||
static void iwlcore_eeprom_enhanced_txpower_old(struct iwl_priv *priv)
|
||||
{
|
||||
int eeprom_section_count = 0;
|
||||
int section, element;
|
||||
|
@ -419,7 +419,8 @@ void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv)
|
|||
* always check for valid entry before process
|
||||
* the information
|
||||
*/
|
||||
if (!enhanced_txpower->common || enhanced_txpower->reserved)
|
||||
if (!(enhanced_txpower->flags || enhanced_txpower->channel) ||
|
||||
enhanced_txpower->delta_20_in_40)
|
||||
continue;
|
||||
|
||||
for (element = 0; element < eeprom_section_count; element++) {
|
||||
|
@ -452,3 +453,86 @@ void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
iwlcore_eeprom_enh_txp_read_element(struct iwl_priv *priv,
|
||||
struct iwl_eeprom_enhanced_txpwr *txp,
|
||||
s8 max_txpower_avg)
|
||||
{
|
||||
int ch_idx;
|
||||
bool is_ht40 = txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ;
|
||||
enum ieee80211_band band;
|
||||
|
||||
band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
|
||||
IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
|
||||
|
||||
for (ch_idx = 0; ch_idx < priv->channel_count; ch_idx++) {
|
||||
struct iwl_channel_info *ch_info = &priv->channel_info[ch_idx];
|
||||
|
||||
/* update matching channel or from common data only */
|
||||
if (txp->channel != 0 && ch_info->channel != txp->channel)
|
||||
continue;
|
||||
|
||||
/* update matching band only */
|
||||
if (band != ch_info->band)
|
||||
continue;
|
||||
|
||||
if (ch_info->max_power_avg < max_txpower_avg && !is_ht40) {
|
||||
ch_info->max_power_avg = max_txpower_avg;
|
||||
ch_info->curr_txpow = max_txpower_avg;
|
||||
ch_info->scan_power = max_txpower_avg;
|
||||
}
|
||||
|
||||
if (is_ht40 && ch_info->ht40_max_power_avg < max_txpower_avg)
|
||||
ch_info->ht40_max_power_avg = max_txpower_avg;
|
||||
}
|
||||
}
|
||||
|
||||
#define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT)
|
||||
#define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr)
|
||||
#define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE)
|
||||
|
||||
static void iwlcore_eeprom_enhanced_txpower_new(struct iwl_priv *priv)
|
||||
{
|
||||
struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
|
||||
int idx, entries;
|
||||
__le16 *txp_len;
|
||||
s8 max_txp_avg, max_txp_avg_halfdbm;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
|
||||
|
||||
/* the length is in 16-bit words, but we want entries */
|
||||
txp_len = (__le16 *) iwlagn_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS);
|
||||
entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
|
||||
|
||||
txp_array = (void *) iwlagn_eeprom_query_addr(priv, EEPROM_TXP_OFFS);
|
||||
for (idx = 0; idx < entries; idx++) {
|
||||
txp = &txp_array[idx];
|
||||
|
||||
/* skip invalid entries */
|
||||
if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID))
|
||||
continue;
|
||||
|
||||
max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
|
||||
&max_txp_avg_halfdbm);
|
||||
|
||||
/*
|
||||
* Update the user limit values values to the highest
|
||||
* power supported by any channel
|
||||
*/
|
||||
if (max_txp_avg > priv->tx_power_user_lmt)
|
||||
priv->tx_power_user_lmt = max_txp_avg;
|
||||
if (max_txp_avg_halfdbm > priv->tx_power_lmt_in_half_dbm)
|
||||
priv->tx_power_lmt_in_half_dbm = max_txp_avg_halfdbm;
|
||||
|
||||
iwlcore_eeprom_enh_txp_read_element(priv, txp, max_txp_avg);
|
||||
}
|
||||
}
|
||||
|
||||
void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv)
|
||||
{
|
||||
if (priv->cfg->use_new_eeprom_reading)
|
||||
iwlcore_eeprom_enhanced_txpower_new(priv);
|
||||
else
|
||||
iwlcore_eeprom_enhanced_txpower_old(priv);
|
||||
}
|
||||
|
|
|
@ -569,6 +569,12 @@ static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
|
|||
case INDIRECT_REGULATORY:
|
||||
offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
|
||||
break;
|
||||
case INDIRECT_TXP_LIMIT:
|
||||
offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT);
|
||||
break;
|
||||
case INDIRECT_TXP_LIMIT_SIZE:
|
||||
offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE);
|
||||
break;
|
||||
case INDIRECT_CALIBRATION:
|
||||
offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
|
||||
break;
|
||||
|
|
|
@ -390,6 +390,7 @@ struct iwl_cfg {
|
|||
const bool need_temp_offset_calib; /* if used set to true */
|
||||
u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
|
||||
u8 scan_tx_antennas[IEEE80211_NUM_BANDS];
|
||||
const bool use_new_eeprom_reading; /* temporary, remove later */
|
||||
};
|
||||
|
||||
/***************************
|
||||
|
|
|
@ -120,6 +120,17 @@ struct iwl_eeprom_channel {
|
|||
s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
|
||||
} __packed;
|
||||
|
||||
enum iwl_eeprom_enhanced_txpwr_flags {
|
||||
IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0),
|
||||
IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1),
|
||||
IWL_EEPROM_ENH_TXP_FL_OFDM = BIT(2),
|
||||
IWL_EEPROM_ENH_TXP_FL_40MHZ = BIT(3),
|
||||
IWL_EEPROM_ENH_TXP_FL_HT_AP = BIT(4),
|
||||
IWL_EEPROM_ENH_TXP_FL_RES1 = BIT(5),
|
||||
IWL_EEPROM_ENH_TXP_FL_RES2 = BIT(6),
|
||||
IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE = BIT(7),
|
||||
};
|
||||
|
||||
/**
|
||||
* iwl_eeprom_enhanced_txpwr structure
|
||||
* This structure presents the enhanced regulatory tx power limit layout
|
||||
|
@ -127,21 +138,23 @@ struct iwl_eeprom_channel {
|
|||
* Enhanced regulatory tx power portion of eeprom image can be broken down
|
||||
* into individual structures; each one is 8 bytes in size and contain the
|
||||
* following information
|
||||
* @common: (desc + channel) not used by driver, should _NOT_ be "zero"
|
||||
* @flags: entry flags
|
||||
* @channel: channel number
|
||||
* @chain_a_max_pwr: chain a max power in 1/2 dBm
|
||||
* @chain_b_max_pwr: chain b max power in 1/2 dBm
|
||||
* @chain_c_max_pwr: chain c max power in 1/2 dBm
|
||||
* @reserved: not used, should be "zero"
|
||||
* @delta_20_in_40: 20-in-40 deltas (hi/lo)
|
||||
* @mimo2_max_pwr: mimo2 max power in 1/2 dBm
|
||||
* @mimo3_max_pwr: mimo3 max power in 1/2 dBm
|
||||
*
|
||||
*/
|
||||
struct iwl_eeprom_enhanced_txpwr {
|
||||
__le16 common;
|
||||
u8 flags;
|
||||
u8 channel;
|
||||
s8 chain_a_max;
|
||||
s8 chain_b_max;
|
||||
s8 chain_c_max;
|
||||
s8 reserved;
|
||||
u8 delta_20_in_40;
|
||||
s8 mimo2_max;
|
||||
s8 mimo3_max;
|
||||
} __packed;
|
||||
|
@ -186,6 +199,8 @@ struct iwl_eeprom_enhanced_txpwr {
|
|||
#define EEPROM_LINK_CALIBRATION (2*0x67)
|
||||
#define EEPROM_LINK_PROCESS_ADJST (2*0x68)
|
||||
#define EEPROM_LINK_OTHERS (2*0x69)
|
||||
#define EEPROM_LINK_TXP_LIMIT (2*0x6a)
|
||||
#define EEPROM_LINK_TXP_LIMIT_SIZE (2*0x6b)
|
||||
|
||||
/* agn regulatory - indirect access */
|
||||
#define EEPROM_REG_BAND_1_CHANNELS ((0x08)\
|
||||
|
@ -389,6 +404,8 @@ struct iwl_eeprom_calib_info {
|
|||
#define INDIRECT_CALIBRATION 0x00040000
|
||||
#define INDIRECT_PROCESS_ADJST 0x00050000
|
||||
#define INDIRECT_OTHERS 0x00060000
|
||||
#define INDIRECT_TXP_LIMIT 0x00070000
|
||||
#define INDIRECT_TXP_LIMIT_SIZE 0x00080000
|
||||
#define INDIRECT_ADDRESS 0x00100000
|
||||
|
||||
/* General */
|
||||
|
|
|
@ -619,7 +619,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
|
|||
print_ssid(ssid_buf, ssid, ssid_len),
|
||||
LBS_SCAN_RSSI_TO_MBM(rssi)/100);
|
||||
|
||||
if (channel ||
|
||||
if (channel &&
|
||||
!(channel->flags & IEEE80211_CHAN_DISABLED))
|
||||
cfg80211_inform_bss(wiphy, channel,
|
||||
bssid, le64_to_cpu(*(__le64 *)tsfdesc),
|
||||
|
|
|
@ -43,6 +43,7 @@ MODULE_FIRMWARE("isl3887usb");
|
|||
|
||||
static struct usb_device_id p54u_table[] __devinitdata = {
|
||||
/* Version 1 devices (pci chip + net2280) */
|
||||
{USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */
|
||||
{USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */
|
||||
{USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */
|
||||
{USB_DEVICE(0x06b9, 0x0120)}, /* Thomson SpeedTouch 120g */
|
||||
|
@ -56,9 +57,13 @@ static struct usb_device_id p54u_table[] __devinitdata = {
|
|||
{USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */
|
||||
{USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */
|
||||
{USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */
|
||||
{USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */
|
||||
{USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */
|
||||
{USB_DEVICE(0x124a, 0x4023)}, /* Shuttle PN15, Airvast WM168g, IOGear GWU513 */
|
||||
{USB_DEVICE(0x1435, 0x0210)}, /* Inventel UR054G */
|
||||
{USB_DEVICE(0x15a9, 0x0002)}, /* Gemtek WUBI-100GW 802.11g */
|
||||
{USB_DEVICE(0x1630, 0x0005)}, /* 2Wire 802.11g USB (v1) / Z-Com */
|
||||
{USB_DEVICE(0x182d, 0x096b)}, /* Sitecom WL-107 */
|
||||
{USB_DEVICE(0x1915, 0x2234)}, /* Linksys WUSB54G OEM */
|
||||
{USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */
|
||||
{USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */
|
||||
|
@ -94,6 +99,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
|
|||
{USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
|
||||
{USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */
|
||||
{USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
|
||||
{USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */
|
||||
{USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */
|
||||
{USB_DEVICE(0x413c, 0x8102)}, /* Spinnaker DUT */
|
||||
{USB_DEVICE(0x413c, 0x8104)}, /* Cohiba Proto board */
|
||||
|
|
|
@ -912,6 +912,7 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
|
|||
__set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
|
||||
__set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags);
|
||||
__set_bit(DRIVER_REQUIRE_TXSTATUS_FIFO, &rt2x00dev->flags);
|
||||
__set_bit(DRIVER_REQUIRE_TASKLET_CONTEXT, &rt2x00dev->flags);
|
||||
if (!modparam_nohwcrypt)
|
||||
__set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
|
||||
__set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags);
|
||||
|
|
|
@ -664,6 +664,7 @@ enum rt2x00_flags {
|
|||
DRIVER_REQUIRE_COPY_IV,
|
||||
DRIVER_REQUIRE_L2PAD,
|
||||
DRIVER_REQUIRE_TXSTATUS_FIFO,
|
||||
DRIVER_REQUIRE_TASKLET_CONTEXT,
|
||||
|
||||
/*
|
||||
* Driver features
|
||||
|
|
|
@ -390,9 +390,12 @@ void rt2x00lib_txdone(struct queue_entry *entry,
|
|||
* through a mac80211 library call (RTS/CTS) then we should not
|
||||
* send the status report back.
|
||||
*/
|
||||
if (!(skbdesc_flags & SKBDESC_NOT_MAC80211))
|
||||
ieee80211_tx_status(rt2x00dev->hw, entry->skb);
|
||||
else
|
||||
if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) {
|
||||
if (test_bit(DRIVER_REQUIRE_TASKLET_CONTEXT, &rt2x00dev->flags))
|
||||
ieee80211_tx_status(rt2x00dev->hw, entry->skb);
|
||||
else
|
||||
ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb);
|
||||
} else
|
||||
dev_kfree_skb_any(entry->skb);
|
||||
|
||||
/*
|
||||
|
|
|
@ -744,7 +744,7 @@ static int yellowfin_init_ring(struct net_device *dev)
|
|||
}
|
||||
|
||||
for (i = 0; i < RX_RING_SIZE; i++) {
|
||||
struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
|
||||
struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2);
|
||||
yp->rx_skbuff[i] = skb;
|
||||
if (skb == NULL)
|
||||
break;
|
||||
|
@ -1157,7 +1157,7 @@ static int yellowfin_rx(struct net_device *dev)
|
|||
for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
|
||||
entry = yp->dirty_rx % RX_RING_SIZE;
|
||||
if (yp->rx_skbuff[entry] == NULL) {
|
||||
struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
|
||||
struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2);
|
||||
if (skb == NULL)
|
||||
break; /* Better luck next round. */
|
||||
yp->rx_skbuff[entry] = skb;
|
||||
|
|
|
@ -70,7 +70,7 @@ struct nlmsghdr {
|
|||
Check NLM_F_EXCL
|
||||
*/
|
||||
|
||||
#define NLMSG_ALIGNTO 4
|
||||
#define NLMSG_ALIGNTO 4U
|
||||
#define NLMSG_ALIGN(len) ( ((len)+NLMSG_ALIGNTO-1) & ~(NLMSG_ALIGNTO-1) )
|
||||
#define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr)))
|
||||
#define NLMSG_LENGTH(len) ((len)+NLMSG_ALIGN(NLMSG_HDRLEN))
|
||||
|
|
|
@ -49,7 +49,6 @@ struct flowi {
|
|||
__u8 proto;
|
||||
__u8 flags;
|
||||
#define FLOWI_FLAG_ANYSRC 0x01
|
||||
#define FLOWI_FLAG_MATCH_ANY_IIF 0x02
|
||||
union {
|
||||
struct {
|
||||
__be16 sport;
|
||||
|
|
|
@ -164,5 +164,15 @@ static inline int ipv6_unicast_destination(struct sk_buff *skb)
|
|||
return rt->rt6i_flags & RTF_LOCAL;
|
||||
}
|
||||
|
||||
int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
|
||||
|
||||
static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
|
||||
{
|
||||
struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
|
||||
|
||||
return (np && np->pmtudisc == IPV6_PMTUDISC_PROBE) ?
|
||||
skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -2024,8 +2024,8 @@ static inline void ieee80211_rx_ni(struct ieee80211_hw *hw,
|
|||
*
|
||||
* This function may not be called in IRQ context. Calls to this function
|
||||
* for a single hardware must be synchronized against each other. Calls
|
||||
* to this function and ieee80211_tx_status_irqsafe() may not be mixed
|
||||
* for a single hardware.
|
||||
* to this function, ieee80211_tx_status_ni() and ieee80211_tx_status_irqsafe()
|
||||
* may not be mixed for a single hardware.
|
||||
*
|
||||
* @hw: the hardware the frame was transmitted by
|
||||
* @skb: the frame that was transmitted, owned by mac80211 after this call
|
||||
|
@ -2033,14 +2033,34 @@ static inline void ieee80211_rx_ni(struct ieee80211_hw *hw,
|
|||
void ieee80211_tx_status(struct ieee80211_hw *hw,
|
||||
struct sk_buff *skb);
|
||||
|
||||
/**
|
||||
* ieee80211_tx_status_ni - transmit status callback (in process context)
|
||||
*
|
||||
* Like ieee80211_tx_status() but can be called in process context.
|
||||
*
|
||||
* Calls to this function, ieee80211_tx_status() and
|
||||
* ieee80211_tx_status_irqsafe() may not be mixed
|
||||
* for a single hardware.
|
||||
*
|
||||
* @hw: the hardware the frame was transmitted by
|
||||
* @skb: the frame that was transmitted, owned by mac80211 after this call
|
||||
*/
|
||||
static inline void ieee80211_tx_status_ni(struct ieee80211_hw *hw,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
local_bh_disable();
|
||||
ieee80211_tx_status(hw, skb);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
/**
|
||||
* ieee80211_tx_status_irqsafe - IRQ-safe transmit status callback
|
||||
*
|
||||
* Like ieee80211_tx_status() but can be called in IRQ context
|
||||
* (internally defers to a tasklet.)
|
||||
*
|
||||
* Calls to this function and ieee80211_tx_status() may not be mixed for a
|
||||
* single hardware.
|
||||
* Calls to this function, ieee80211_tx_status() and
|
||||
* ieee80211_tx_status_ni() may not be mixed for a single hardware.
|
||||
*
|
||||
* @hw: the hardware the frame was transmitted by
|
||||
* @skb: the frame that was transmitted, owned by mac80211 after this call
|
||||
|
|
|
@ -323,7 +323,9 @@ static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
|
|||
static inline int tcf_valid_offset(const struct sk_buff *skb,
|
||||
const unsigned char *ptr, const int len)
|
||||
{
|
||||
return unlikely((ptr + len) < skb_tail_pointer(skb) && ptr > skb->head);
|
||||
return likely((ptr + len) <= skb_tail_pointer(skb) &&
|
||||
ptr >= skb->head &&
|
||||
(ptr <= (ptr + len)));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_CLS_IND
|
||||
|
|
|
@ -610,11 +610,7 @@ static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
|
|||
{
|
||||
struct sk_buff *n;
|
||||
|
||||
if ((action == TC_ACT_STOLEN || action == TC_ACT_QUEUED) &&
|
||||
!skb_shared(skb))
|
||||
n = skb_get(skb);
|
||||
else
|
||||
n = skb_clone(skb, gfp_mask);
|
||||
n = skb_clone(skb, gfp_mask);
|
||||
|
||||
if (n) {
|
||||
n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
|
||||
|
|
|
@ -754,6 +754,7 @@ struct proto {
|
|||
void (*unhash)(struct sock *sk);
|
||||
void (*rehash)(struct sock *sk);
|
||||
int (*get_port)(struct sock *sk, unsigned short snum);
|
||||
void (*clear_sk)(struct sock *sk, int size);
|
||||
|
||||
/* Keeping track of sockets in use */
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
@ -852,6 +853,8 @@ static inline void __sk_prot_rehash(struct sock *sk)
|
|||
sk->sk_prot->hash(sk);
|
||||
}
|
||||
|
||||
void sk_prot_clear_portaddr_nulls(struct sock *sk, int size);
|
||||
|
||||
/* About 10 seconds */
|
||||
#define SOCK_DESTROY_TIME (10*HZ)
|
||||
|
||||
|
|
|
@ -311,6 +311,7 @@ static void rfcomm_dlc_clear_state(struct rfcomm_dlc *d)
|
|||
d->state = BT_OPEN;
|
||||
d->flags = 0;
|
||||
d->mscex = 0;
|
||||
d->sec_level = BT_SECURITY_LOW;
|
||||
d->mtu = RFCOMM_DEFAULT_MTU;
|
||||
d->v24_sig = RFCOMM_V24_RTC | RFCOMM_V24_RTR | RFCOMM_V24_DV;
|
||||
|
||||
|
|
|
@ -437,7 +437,7 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
|
|||
ip6h = ipv6_hdr(skb);
|
||||
|
||||
*(__force __be32 *)ip6h = htonl(0x60000000);
|
||||
ip6h->payload_len = 8 + sizeof(*mldq);
|
||||
ip6h->payload_len = htons(8 + sizeof(*mldq));
|
||||
ip6h->nexthdr = IPPROTO_HOPOPTS;
|
||||
ip6h->hop_limit = 1;
|
||||
ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0);
|
||||
|
|
|
@ -181,8 +181,7 @@ static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
if (rule->iifindex && (rule->iifindex != fl->iif) &&
|
||||
!(fl->flags & FLOWI_FLAG_MATCH_ANY_IIF))
|
||||
if (rule->iifindex && (rule->iifindex != fl->iif))
|
||||
goto out;
|
||||
|
||||
if (rule->oifindex && (rule->oifindex != fl->oif))
|
||||
|
|
|
@ -1009,6 +1009,36 @@ static void sock_copy(struct sock *nsk, const struct sock *osk)
|
|||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
|
||||
* un-modified. Special care is taken when initializing object to zero.
|
||||
*/
|
||||
static inline void sk_prot_clear_nulls(struct sock *sk, int size)
|
||||
{
|
||||
if (offsetof(struct sock, sk_node.next) != 0)
|
||||
memset(sk, 0, offsetof(struct sock, sk_node.next));
|
||||
memset(&sk->sk_node.pprev, 0,
|
||||
size - offsetof(struct sock, sk_node.pprev));
|
||||
}
|
||||
|
||||
void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
|
||||
{
|
||||
unsigned long nulls1, nulls2;
|
||||
|
||||
nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
|
||||
nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
|
||||
if (nulls1 > nulls2)
|
||||
swap(nulls1, nulls2);
|
||||
|
||||
if (nulls1 != 0)
|
||||
memset((char *)sk, 0, nulls1);
|
||||
memset((char *)sk + nulls1 + sizeof(void *), 0,
|
||||
nulls2 - nulls1 - sizeof(void *));
|
||||
memset((char *)sk + nulls2 + sizeof(void *), 0,
|
||||
size - nulls2 - sizeof(void *));
|
||||
}
|
||||
EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
|
||||
|
||||
static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
|
||||
int family)
|
||||
{
|
||||
|
@ -1021,19 +1051,12 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
|
|||
if (!sk)
|
||||
return sk;
|
||||
if (priority & __GFP_ZERO) {
|
||||
/*
|
||||
* caches using SLAB_DESTROY_BY_RCU should let
|
||||
* sk_node.next un-modified. Special care is taken
|
||||
* when initializing object to zero.
|
||||
*/
|
||||
if (offsetof(struct sock, sk_node.next) != 0)
|
||||
memset(sk, 0, offsetof(struct sock, sk_node.next));
|
||||
memset(&sk->sk_node.pprev, 0,
|
||||
prot->obj_size - offsetof(struct sock,
|
||||
sk_node.pprev));
|
||||
if (prot->clear_sk)
|
||||
prot->clear_sk(sk, prot->obj_size);
|
||||
else
|
||||
sk_prot_clear_nulls(sk, prot->obj_size);
|
||||
}
|
||||
}
|
||||
else
|
||||
} else
|
||||
sk = kmalloc(prot->obj_size, priority);
|
||||
|
||||
if (sk != NULL) {
|
||||
|
|
|
@ -163,13 +163,19 @@ struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
|
|||
.daddr = addr
|
||||
}
|
||||
},
|
||||
.flags = FLOWI_FLAG_MATCH_ANY_IIF
|
||||
};
|
||||
struct fib_result res = { 0 };
|
||||
struct net_device *dev = NULL;
|
||||
struct fib_table *local_table;
|
||||
|
||||
#ifdef CONFIG_IP_MULTIPLE_TABLES
|
||||
res.r = NULL;
|
||||
#endif
|
||||
|
||||
rcu_read_lock();
|
||||
if (fib_lookup(net, &fl, &res)) {
|
||||
local_table = fib_get_table(net, RT_TABLE_LOCAL);
|
||||
if (!local_table ||
|
||||
fib_table_lookup(local_table, &fl, &res, FIB_LOOKUP_NOREF)) {
|
||||
rcu_read_unlock();
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -2585,9 +2585,10 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
|
|||
goto out;
|
||||
|
||||
/* RACE: Check return value of inet_select_addr instead. */
|
||||
if (rcu_dereference(dev_out->ip_ptr) == NULL)
|
||||
goto out; /* Wrong error code */
|
||||
|
||||
if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
|
||||
err = -ENETUNREACH;
|
||||
goto out;
|
||||
}
|
||||
if (ipv4_is_local_multicast(oldflp->fl4_dst) ||
|
||||
ipv4_is_lbcast(oldflp->fl4_dst)) {
|
||||
if (!fl.fl4_src)
|
||||
|
|
|
@ -2030,7 +2030,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
|
|||
get_req:
|
||||
req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
|
||||
}
|
||||
sk = sk_next(st->syn_wait_sk);
|
||||
sk = sk_nulls_next(st->syn_wait_sk);
|
||||
st->state = TCP_SEQ_STATE_LISTENING;
|
||||
read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
|
||||
} else {
|
||||
|
@ -2039,7 +2039,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
|
|||
if (reqsk_queue_len(&icsk->icsk_accept_queue))
|
||||
goto start_req;
|
||||
read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
|
||||
sk = sk_next(sk);
|
||||
sk = sk_nulls_next(sk);
|
||||
}
|
||||
get_sk:
|
||||
sk_nulls_for_each_from(sk, node) {
|
||||
|
|
|
@ -1899,6 +1899,7 @@ struct proto udp_prot = {
|
|||
.compat_setsockopt = compat_udp_setsockopt,
|
||||
.compat_getsockopt = compat_udp_getsockopt,
|
||||
#endif
|
||||
.clear_sk = sk_prot_clear_portaddr_nulls,
|
||||
};
|
||||
EXPORT_SYMBOL(udp_prot);
|
||||
|
||||
|
|
|
@ -57,6 +57,7 @@ struct proto udplite_prot = {
|
|||
.compat_setsockopt = compat_udp_setsockopt,
|
||||
.compat_getsockopt = compat_udp_getsockopt,
|
||||
#endif
|
||||
.clear_sk = sk_prot_clear_portaddr_nulls,
|
||||
};
|
||||
EXPORT_SYMBOL(udplite_prot);
|
||||
|
||||
|
|
|
@ -2669,7 +2669,9 @@ static int addrconf_ifdown(struct net_device *dev, int how)
|
|||
|
||||
ASSERT_RTNL();
|
||||
|
||||
rt6_ifdown(net, dev);
|
||||
/* Flush routes if device is being removed or it is not loopback */
|
||||
if (how || !(dev->flags & IFF_LOOPBACK))
|
||||
rt6_ifdown(net, dev);
|
||||
neigh_ifdown(&nd_tbl, dev);
|
||||
|
||||
idev = __in6_dev_get(dev);
|
||||
|
|
|
@ -56,7 +56,7 @@
|
|||
#include <net/checksum.h>
|
||||
#include <linux/mroute6.h>
|
||||
|
||||
static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
|
||||
int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
|
||||
|
||||
int __ip6_local_out(struct sk_buff *skb)
|
||||
{
|
||||
|
@ -145,14 +145,6 @@ static int ip6_finish_output2(struct sk_buff *skb)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
|
||||
{
|
||||
struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
|
||||
|
||||
return (np && np->pmtudisc == IPV6_PMTUDISC_PROBE) ?
|
||||
skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
|
||||
}
|
||||
|
||||
static int ip6_finish_output(struct sk_buff *skb)
|
||||
{
|
||||
if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
|
||||
|
@ -601,7 +593,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
|
|||
return offset;
|
||||
}
|
||||
|
||||
static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
|
||||
int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
|
||||
{
|
||||
struct sk_buff *frag;
|
||||
struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
|
||||
|
|
|
@ -1565,11 +1565,16 @@ static void rt6_do_pmtu_disc(struct in6_addr *daddr, struct in6_addr *saddr,
|
|||
{
|
||||
struct rt6_info *rt, *nrt;
|
||||
int allfrag = 0;
|
||||
|
||||
again:
|
||||
rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
|
||||
if (rt == NULL)
|
||||
return;
|
||||
|
||||
if (rt6_check_expired(rt)) {
|
||||
ip6_del_rt(rt);
|
||||
goto again;
|
||||
}
|
||||
|
||||
if (pmtu >= dst_mtu(&rt->dst))
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -1477,6 +1477,7 @@ struct proto udpv6_prot = {
|
|||
.compat_setsockopt = compat_udpv6_setsockopt,
|
||||
.compat_getsockopt = compat_udpv6_getsockopt,
|
||||
#endif
|
||||
.clear_sk = sk_prot_clear_portaddr_nulls,
|
||||
};
|
||||
|
||||
static struct inet_protosw udpv6_protosw = {
|
||||
|
|
|
@ -55,6 +55,7 @@ struct proto udplitev6_prot = {
|
|||
.compat_setsockopt = compat_udpv6_setsockopt,
|
||||
.compat_getsockopt = compat_udpv6_getsockopt,
|
||||
#endif
|
||||
.clear_sk = sk_prot_clear_portaddr_nulls,
|
||||
};
|
||||
|
||||
static struct inet_protosw udplite6_protosw = {
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/netfilter_ipv6.h>
|
||||
#include <net/dst.h>
|
||||
#include <net/ipv6.h>
|
||||
#include <net/ip6_route.h>
|
||||
#include <net/xfrm.h>
|
||||
|
||||
int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
|
||||
|
@ -88,8 +89,21 @@ static int xfrm6_output_finish(struct sk_buff *skb)
|
|||
return xfrm_output(skb);
|
||||
}
|
||||
|
||||
static int __xfrm6_output(struct sk_buff *skb)
|
||||
{
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct xfrm_state *x = dst->xfrm;
|
||||
|
||||
if ((x && x->props.mode == XFRM_MODE_TUNNEL) &&
|
||||
((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
|
||||
dst_allfrag(skb_dst(skb)))) {
|
||||
return ip6_fragment(skb, xfrm6_output_finish);
|
||||
}
|
||||
return xfrm6_output_finish(skb);
|
||||
}
|
||||
|
||||
int xfrm6_output(struct sk_buff *skb)
|
||||
{
|
||||
return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL,
|
||||
skb_dst(skb)->dev, xfrm6_output_finish);
|
||||
skb_dst(skb)->dev, __xfrm6_output);
|
||||
}
|
||||
|
|
|
@ -2280,6 +2280,16 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
|
|||
|
||||
switch (optname) {
|
||||
case IRLMP_ENUMDEVICES:
|
||||
|
||||
/* Offset to first device entry */
|
||||
offset = sizeof(struct irda_device_list) -
|
||||
sizeof(struct irda_device_info);
|
||||
|
||||
if (len < offset) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Ask lmp for the current discovery log */
|
||||
discoveries = irlmp_get_discoveries(&list.len, self->mask.word,
|
||||
self->nslots);
|
||||
|
@ -2290,15 +2300,9 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
|
|||
}
|
||||
|
||||
/* Write total list length back to client */
|
||||
if (copy_to_user(optval, &list,
|
||||
sizeof(struct irda_device_list) -
|
||||
sizeof(struct irda_device_info)))
|
||||
if (copy_to_user(optval, &list, offset))
|
||||
err = -EFAULT;
|
||||
|
||||
/* Offset to first device entry */
|
||||
offset = sizeof(struct irda_device_list) -
|
||||
sizeof(struct irda_device_info);
|
||||
|
||||
/* Copy the list itself - watch for overflow */
|
||||
if (list.len > 2048) {
|
||||
err = -EINVAL;
|
||||
|
|
|
@ -780,6 +780,9 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
|
|||
|
||||
mutex_lock(&sdata->u.ibss.mtx);
|
||||
|
||||
if (!sdata->u.ibss.ssid_len)
|
||||
goto mgmt_out; /* not ready to merge yet */
|
||||
|
||||
switch (fc & IEEE80211_FCTL_STYPE) {
|
||||
case IEEE80211_STYPE_PROBE_REQ:
|
||||
ieee80211_rx_mgmt_probe_req(sdata, mgmt, skb->len);
|
||||
|
@ -797,6 +800,7 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
|
|||
break;
|
||||
}
|
||||
|
||||
mgmt_out:
|
||||
mutex_unlock(&sdata->u.ibss.mtx);
|
||||
}
|
||||
|
||||
|
|
|
@ -1788,9 +1788,11 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
|
|||
|
||||
fwd_skb = skb_copy(skb, GFP_ATOMIC);
|
||||
|
||||
if (!fwd_skb && net_ratelimit())
|
||||
if (!fwd_skb && net_ratelimit()) {
|
||||
printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
|
||||
sdata->name);
|
||||
goto out;
|
||||
}
|
||||
|
||||
fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
|
||||
memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
|
||||
|
@ -1828,6 +1830,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
|
|||
}
|
||||
}
|
||||
|
||||
out:
|
||||
if (is_multicast_ether_addr(hdr->addr1) ||
|
||||
sdata->dev->flags & IFF_PROMISC)
|
||||
return RX_CONTINUE;
|
||||
|
|
|
@ -1051,11 +1051,13 @@ void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
|
|||
{
|
||||
struct ieee80211_local *local = sdata->local;
|
||||
struct ieee80211_work *wk;
|
||||
bool cleanup = false;
|
||||
|
||||
mutex_lock(&local->mtx);
|
||||
list_for_each_entry(wk, &local->work_list, list) {
|
||||
if (wk->sdata != sdata)
|
||||
continue;
|
||||
cleanup = true;
|
||||
wk->type = IEEE80211_WORK_ABORT;
|
||||
wk->started = true;
|
||||
wk->timeout = jiffies;
|
||||
|
@ -1063,7 +1065,8 @@ void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
|
|||
mutex_unlock(&local->mtx);
|
||||
|
||||
/* run cleanups etc. */
|
||||
ieee80211_work_work(&local->work_work);
|
||||
if (cleanup)
|
||||
ieee80211_work_work(&local->work_work);
|
||||
|
||||
mutex_lock(&local->mtx);
|
||||
list_for_each_entry(wk, &local->work_list, list) {
|
||||
|
|
|
@ -270,7 +270,6 @@ static unsigned int sfq_drop(struct Qdisc *sch)
|
|||
/* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
|
||||
d = q->next[q->tail];
|
||||
q->next[q->tail] = q->next[d];
|
||||
q->allot[q->next[d]] += q->quantum;
|
||||
skb = q->qs[d].prev;
|
||||
len = qdisc_pkt_len(skb);
|
||||
__skb_unlink(skb, &q->qs[d]);
|
||||
|
@ -321,14 +320,13 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||
sfq_inc(q, x);
|
||||
if (q->qs[x].qlen == 1) { /* The flow is new */
|
||||
if (q->tail == SFQ_DEPTH) { /* It is the first flow */
|
||||
q->tail = x;
|
||||
q->next[x] = x;
|
||||
q->allot[x] = q->quantum;
|
||||
} else {
|
||||
q->next[x] = q->next[q->tail];
|
||||
q->next[q->tail] = x;
|
||||
q->tail = x;
|
||||
}
|
||||
q->tail = x;
|
||||
q->allot[x] = q->quantum;
|
||||
}
|
||||
if (++sch->q.qlen <= q->limit) {
|
||||
sch->bstats.bytes += qdisc_pkt_len(skb);
|
||||
|
@ -359,13 +357,13 @@ sfq_dequeue(struct Qdisc *sch)
|
|||
{
|
||||
struct sfq_sched_data *q = qdisc_priv(sch);
|
||||
struct sk_buff *skb;
|
||||
sfq_index a, old_a;
|
||||
sfq_index a, next_a;
|
||||
|
||||
/* No active slots */
|
||||
if (q->tail == SFQ_DEPTH)
|
||||
return NULL;
|
||||
|
||||
a = old_a = q->next[q->tail];
|
||||
a = q->next[q->tail];
|
||||
|
||||
/* Grab packet */
|
||||
skb = __skb_dequeue(&q->qs[a]);
|
||||
|
@ -376,17 +374,15 @@ sfq_dequeue(struct Qdisc *sch)
|
|||
/* Is the slot empty? */
|
||||
if (q->qs[a].qlen == 0) {
|
||||
q->ht[q->hash[a]] = SFQ_DEPTH;
|
||||
a = q->next[a];
|
||||
if (a == old_a) {
|
||||
next_a = q->next[a];
|
||||
if (a == next_a) {
|
||||
q->tail = SFQ_DEPTH;
|
||||
return skb;
|
||||
}
|
||||
q->next[q->tail] = a;
|
||||
q->allot[a] += q->quantum;
|
||||
q->next[q->tail] = next_a;
|
||||
} else if ((q->allot[a] -= qdisc_pkt_len(skb)) <= 0) {
|
||||
q->tail = a;
|
||||
a = q->next[a];
|
||||
q->allot[a] += q->quantum;
|
||||
q->tail = a;
|
||||
}
|
||||
return skb;
|
||||
}
|
||||
|
|
|
@ -5053,7 +5053,7 @@ static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len,
|
|||
if (copy_to_user(optval, &val, len))
|
||||
return -EFAULT;
|
||||
|
||||
return -ENOTSUPP;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue