Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
  net: Silence seq_scale() unused warning
  ipv4:correct description for tcp_max_syn_backlog
  pasemi_mac: Fix building as module
  netback: Fix alert message.
  r8169: fix Rx index race between FIFO overflow recovery and NAPI handler.
  r8169: Rx FIFO overflow fixes.
  ipv4: Fix peer validation on cached lookup.
  ipv4: make sure RTO_ONLINK is saved in routing cache
  iwlwifi: change the default behavior of watchdog timer
  iwlwifi: do not re-configure HT40 after associated
  iwlagn: fix HW crypto for TX-only keys
  Revert "mac80211: clear sta.drv_priv on reconfiguration"
  mac80211: fill rate filter for internal scan requests
  cfg80211: amend regulatory NULL dereference fix
  cfg80211: fix race on init and driver registration
This commit is contained in:
Linus Torvalds 2011-12-06 12:03:54 -08:00
commit b835c0f47f
19 changed files with 155 additions and 132 deletions

View File

@ -282,11 +282,11 @@ tcp_max_ssthresh - INTEGER
Default: 0 (off) Default: 0 (off)
tcp_max_syn_backlog - INTEGER tcp_max_syn_backlog - INTEGER
Maximal number of remembered connection requests, which are Maximal number of remembered connection requests, which have not
still did not receive an acknowledgment from connecting client. received an acknowledgment from connecting client.
Default value is 1024 for systems with more than 128Mb of memory, The minimal value is 128 for low memory machines, and it will
and 128 for low memory machines. If server suffers of overload, increase in proportion to the memory of machine.
try to increase this number. If server suffers from overload, try increasing this number.
tcp_max_tw_buckets - INTEGER tcp_max_tw_buckets - INTEGER
Maximal number of timewait sockets held by system simultaneously. Maximal number of timewait sockets held by system simultaneously.

View File

@ -2,4 +2,5 @@
# Makefile for the A Semi network device drivers. # Makefile for the A Semi network device drivers.
# #
obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o pasemi_mac_ethtool.o obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o

View File

@ -1183,11 +1183,13 @@ static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
return value; return value;
} }
static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr) static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
{ {
RTL_W16(IntrMask, 0x0000); void __iomem *ioaddr = tp->mmio_addr;
RTL_W16(IntrStatus, 0xffff); RTL_W16(IntrMask, 0x0000);
RTL_W16(IntrStatus, tp->intr_event);
RTL_R8(ChipCmd);
} }
static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp) static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
@ -3933,8 +3935,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
break; break;
udelay(100); udelay(100);
} }
rtl8169_init_ring_indexes(tp);
} }
static int __devinit static int __devinit
@ -4339,7 +4339,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
void __iomem *ioaddr = tp->mmio_addr; void __iomem *ioaddr = tp->mmio_addr;
/* Disable interrupts */ /* Disable interrupts */
rtl8169_irq_mask_and_ack(ioaddr); rtl8169_irq_mask_and_ack(tp);
rtl_rx_close(tp); rtl_rx_close(tp);
@ -4885,8 +4885,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
RTL_W16(IntrMitigate, 0x5151); RTL_W16(IntrMitigate, 0x5151);
/* Work around for RxFIFO overflow. */ /* Work around for RxFIFO overflow. */
if (tp->mac_version == RTL_GIGA_MAC_VER_11 || if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
tp->mac_version == RTL_GIGA_MAC_VER_22) {
tp->intr_event |= RxFIFOOver | PCSTimeout; tp->intr_event |= RxFIFOOver | PCSTimeout;
tp->intr_event &= ~RxOverflow; tp->intr_event &= ~RxOverflow;
} }
@ -5076,6 +5075,11 @@ static void rtl_hw_start_8101(struct net_device *dev)
void __iomem *ioaddr = tp->mmio_addr; void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev; struct pci_dev *pdev = tp->pci_dev;
if (tp->mac_version >= RTL_GIGA_MAC_VER_30) {
tp->intr_event &= ~RxFIFOOver;
tp->napi_event &= ~RxFIFOOver;
}
if (tp->mac_version == RTL_GIGA_MAC_VER_13 || if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
tp->mac_version == RTL_GIGA_MAC_VER_16) { tp->mac_version == RTL_GIGA_MAC_VER_16) {
int cap = pci_pcie_cap(pdev); int cap = pci_pcie_cap(pdev);
@ -5342,7 +5346,7 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev)
/* Wait for any pending NAPI task to complete */ /* Wait for any pending NAPI task to complete */
napi_disable(&tp->napi); napi_disable(&tp->napi);
rtl8169_irq_mask_and_ack(ioaddr); rtl8169_irq_mask_and_ack(tp);
tp->intr_mask = 0xffff; tp->intr_mask = 0xffff;
RTL_W16(IntrMask, tp->intr_event); RTL_W16(IntrMask, tp->intr_event);
@ -5389,14 +5393,16 @@ static void rtl8169_reset_task(struct work_struct *work)
if (!netif_running(dev)) if (!netif_running(dev))
goto out_unlock; goto out_unlock;
rtl8169_hw_reset(tp);
rtl8169_wait_for_quiescence(dev); rtl8169_wait_for_quiescence(dev);
for (i = 0; i < NUM_RX_DESC; i++) for (i = 0; i < NUM_RX_DESC; i++)
rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz); rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
rtl8169_tx_clear(tp); rtl8169_tx_clear(tp);
rtl8169_init_ring_indexes(tp);
rtl8169_hw_reset(tp);
rtl_hw_start(dev); rtl_hw_start(dev);
netif_wake_queue(dev); netif_wake_queue(dev);
rtl8169_check_link_status(dev, tp, tp->mmio_addr); rtl8169_check_link_status(dev, tp, tp->mmio_addr);
@ -5407,11 +5413,6 @@ static void rtl8169_reset_task(struct work_struct *work)
static void rtl8169_tx_timeout(struct net_device *dev) static void rtl8169_tx_timeout(struct net_device *dev)
{ {
struct rtl8169_private *tp = netdev_priv(dev);
rtl8169_hw_reset(tp);
/* Let's wait a bit while any (async) irq lands on */
rtl8169_schedule_work(dev, rtl8169_reset_task); rtl8169_schedule_work(dev, rtl8169_reset_task);
} }
@ -5804,6 +5805,10 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
*/ */
status = RTL_R16(IntrStatus); status = RTL_R16(IntrStatus);
while (status && status != 0xffff) { while (status && status != 0xffff) {
status &= tp->intr_event;
if (!status)
break;
handled = 1; handled = 1;
/* Handle all of the error cases first. These will reset /* Handle all of the error cases first. These will reset
@ -5818,27 +5823,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
switch (tp->mac_version) { switch (tp->mac_version) {
/* Work around for rx fifo overflow */ /* Work around for rx fifo overflow */
case RTL_GIGA_MAC_VER_11: case RTL_GIGA_MAC_VER_11:
case RTL_GIGA_MAC_VER_22:
case RTL_GIGA_MAC_VER_26:
netif_stop_queue(dev); netif_stop_queue(dev);
rtl8169_tx_timeout(dev); rtl8169_tx_timeout(dev);
goto done; goto done;
/* Testers needed. */
case RTL_GIGA_MAC_VER_17:
case RTL_GIGA_MAC_VER_19:
case RTL_GIGA_MAC_VER_20:
case RTL_GIGA_MAC_VER_21:
case RTL_GIGA_MAC_VER_23:
case RTL_GIGA_MAC_VER_24:
case RTL_GIGA_MAC_VER_27:
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
/* Experimental science. Pktgen proof. */
case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_25:
if (status == RxFIFOOver)
goto done;
break;
default: default:
break; break;
} }

View File

@ -191,6 +191,7 @@ static struct iwl_base_params iwl1000_base_params = {
.chain_noise_scale = 1000, .chain_noise_scale = 1000,
.wd_timeout = IWL_DEF_WD_TIMEOUT, .wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 128, .max_event_log_size = 128,
.wd_disable = true,
}; };
static struct iwl_ht_params iwl1000_ht_params = { static struct iwl_ht_params iwl1000_ht_params = {
.ht_greenfield_support = true, .ht_greenfield_support = true,

View File

@ -364,6 +364,7 @@ static struct iwl_base_params iwl5000_base_params = {
.wd_timeout = IWL_LONG_WD_TIMEOUT, .wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512, .max_event_log_size = 512,
.no_idle_support = true, .no_idle_support = true,
.wd_disable = true,
}; };
static struct iwl_ht_params iwl5000_ht_params = { static struct iwl_ht_params iwl5000_ht_params = {
.ht_greenfield_support = true, .ht_greenfield_support = true,

View File

@ -528,6 +528,24 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
return 0; return 0;
} }
void iwlagn_config_ht40(struct ieee80211_conf *conf,
struct iwl_rxon_context *ctx)
{
if (conf_is_ht40_minus(conf)) {
ctx->ht.extension_chan_offset =
IEEE80211_HT_PARAM_CHA_SEC_BELOW;
ctx->ht.is_40mhz = true;
} else if (conf_is_ht40_plus(conf)) {
ctx->ht.extension_chan_offset =
IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
ctx->ht.is_40mhz = true;
} else {
ctx->ht.extension_chan_offset =
IEEE80211_HT_PARAM_CHA_SEC_NONE;
ctx->ht.is_40mhz = false;
}
}
int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
{ {
struct iwl_priv *priv = hw->priv; struct iwl_priv *priv = hw->priv;
@ -586,19 +604,11 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
ctx->ht.enabled = conf_is_ht(conf); ctx->ht.enabled = conf_is_ht(conf);
if (ctx->ht.enabled) { if (ctx->ht.enabled) {
if (conf_is_ht40_minus(conf)) { /* if HT40 is used, it should not change
ctx->ht.extension_chan_offset = * after associated except channel switch */
IEEE80211_HT_PARAM_CHA_SEC_BELOW; if (iwl_is_associated_ctx(ctx) &&
ctx->ht.is_40mhz = true; !ctx->ht.is_40mhz)
} else if (conf_is_ht40_plus(conf)) { iwlagn_config_ht40(conf, ctx);
ctx->ht.extension_chan_offset =
IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
ctx->ht.is_40mhz = true;
} else {
ctx->ht.extension_chan_offset =
IEEE80211_HT_PARAM_CHA_SEC_NONE;
ctx->ht.is_40mhz = false;
}
} else } else
ctx->ht.is_40mhz = false; ctx->ht.is_40mhz = false;

View File

@ -1268,9 +1268,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
switch (keyconf->cipher) { switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_TKIP:
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
if (sta) if (sta)
addr = sta->addr; addr = sta->addr;
else /* station mode case only */ else /* station mode case only */
@ -1283,8 +1280,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
seq.tkip.iv32, p1k, CMD_SYNC); seq.tkip.iv32, p1k, CMD_SYNC);
break; break;
case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP:
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
/* fall through */
case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_WEP104:
ret = iwlagn_send_sta_key(priv, keyconf, sta_id, ret = iwlagn_send_sta_key(priv, keyconf, sta_id,

View File

@ -2316,6 +2316,17 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
/* fall through */
case WLAN_CIPHER_SUITE_CCMP:
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
break;
default:
break;
}
/* /*
* We could program these keys into the hardware as well, but we * We could program these keys into the hardware as well, but we
* don't expect much multicast traffic in IBSS and having keys * don't expect much multicast traffic in IBSS and having keys
@ -2599,21 +2610,9 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
/* Configure HT40 channels */ /* Configure HT40 channels */
ctx->ht.enabled = conf_is_ht(conf); ctx->ht.enabled = conf_is_ht(conf);
if (ctx->ht.enabled) { if (ctx->ht.enabled)
if (conf_is_ht40_minus(conf)) { iwlagn_config_ht40(conf, ctx);
ctx->ht.extension_chan_offset = else
IEEE80211_HT_PARAM_CHA_SEC_BELOW;
ctx->ht.is_40mhz = true;
} else if (conf_is_ht40_plus(conf)) {
ctx->ht.extension_chan_offset =
IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
ctx->ht.is_40mhz = true;
} else {
ctx->ht.extension_chan_offset =
IEEE80211_HT_PARAM_CHA_SEC_NONE;
ctx->ht.is_40mhz = false;
}
} else
ctx->ht.is_40mhz = false; ctx->ht.is_40mhz = false;
if ((le16_to_cpu(ctx->staging.channel) != ch)) if ((le16_to_cpu(ctx->staging.channel) != ch))
@ -3499,9 +3498,10 @@ MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO); module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])"); MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
module_param_named(wd_disable, iwlagn_mod_params.wd_disable, bool, S_IRUGO); module_param_named(wd_disable, iwlagn_mod_params.wd_disable, int, S_IRUGO);
MODULE_PARM_DESC(wd_disable, MODULE_PARM_DESC(wd_disable,
"Disable stuck queue watchdog timer (default: 0 [enabled])"); "Disable stuck queue watchdog timer 0=system default, "
"1=disable, 2=enable (default: 0)");
/* /*
* set bt_coex_active to true, uCode will do kill/defer * set bt_coex_active to true, uCode will do kill/defer

View File

@ -86,6 +86,8 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf, struct ieee80211_bss_conf *bss_conf,
u32 changes); u32 changes);
void iwlagn_config_ht40(struct ieee80211_conf *conf,
struct iwl_rxon_context *ctx);
/* uCode */ /* uCode */
int iwlagn_rx_calib_result(struct iwl_priv *priv, int iwlagn_rx_calib_result(struct iwl_priv *priv,

View File

@ -1810,11 +1810,23 @@ void iwl_setup_watchdog(struct iwl_priv *priv)
{ {
unsigned int timeout = priv->cfg->base_params->wd_timeout; unsigned int timeout = priv->cfg->base_params->wd_timeout;
if (timeout && !iwlagn_mod_params.wd_disable) if (!iwlagn_mod_params.wd_disable) {
mod_timer(&priv->watchdog, /* use system default */
jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout))); if (timeout && !priv->cfg->base_params->wd_disable)
else mod_timer(&priv->watchdog,
del_timer(&priv->watchdog); jiffies +
msecs_to_jiffies(IWL_WD_TICK(timeout)));
else
del_timer(&priv->watchdog);
} else {
/* module parameter overwrite default configuration */
if (timeout && iwlagn_mod_params.wd_disable == 2)
mod_timer(&priv->watchdog,
jiffies +
msecs_to_jiffies(IWL_WD_TICK(timeout)));
else
del_timer(&priv->watchdog);
}
} }
/** /**

View File

@ -113,6 +113,7 @@ struct iwl_lib_ops {
* @shadow_reg_enable: HW shadhow register bit * @shadow_reg_enable: HW shadhow register bit
* @no_idle_support: do not support idle mode * @no_idle_support: do not support idle mode
* @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
* wd_disable: disable watchdog timer
*/ */
struct iwl_base_params { struct iwl_base_params {
int eeprom_size; int eeprom_size;
@ -134,6 +135,7 @@ struct iwl_base_params {
const bool shadow_reg_enable; const bool shadow_reg_enable;
const bool no_idle_support; const bool no_idle_support;
const bool hd_v2; const bool hd_v2;
const bool wd_disable;
}; };
/* /*
* @advanced_bt_coexist: support advanced bt coexist * @advanced_bt_coexist: support advanced bt coexist

View File

@ -120,7 +120,7 @@ extern struct iwl_mod_params iwlagn_mod_params;
* @restart_fw: restart firmware, default = 1 * @restart_fw: restart firmware, default = 1
* @plcp_check: enable plcp health check, default = true * @plcp_check: enable plcp health check, default = true
* @ack_check: disable ack health check, default = false * @ack_check: disable ack health check, default = false
* @wd_disable: enable stuck queue check, default = false * @wd_disable: enable stuck queue check, default = 0
* @bt_coex_active: enable bt coex, default = true * @bt_coex_active: enable bt coex, default = true
* @led_mode: system default, default = 0 * @led_mode: system default, default = 0
* @no_sleep_autoadjust: disable autoadjust, default = true * @no_sleep_autoadjust: disable autoadjust, default = true
@ -141,7 +141,7 @@ struct iwl_mod_params {
int restart_fw; int restart_fw;
bool plcp_check; bool plcp_check;
bool ack_check; bool ack_check;
bool wd_disable; int wd_disable;
bool bt_coex_active; bool bt_coex_active;
int led_mode; int led_mode;
bool no_sleep_autoadjust; bool no_sleep_autoadjust;

View File

@ -1668,7 +1668,7 @@ static int __init netback_init(void)
"netback/%u", group); "netback/%u", group);
if (IS_ERR(netbk->task)) { if (IS_ERR(netbk->task)) {
printk(KERN_ALERT "kthread_run() fails at netback\n"); printk(KERN_ALERT "kthread_create() fails at netback\n");
del_timer(&netbk->net_timer); del_timer(&netbk->net_timer);
rc = PTR_ERR(netbk->task); rc = PTR_ERR(netbk->task);
goto failed_init; goto failed_init;

View File

@ -26,10 +26,11 @@
* but then some measure against one socket starving all other sockets * but then some measure against one socket starving all other sockets
* would be needed. * would be needed.
* *
* It was 128 by default. Experiments with real servers show, that * The minimum value of it is 128. Experiments with real servers show that
* it is absolutely not enough even at 100conn/sec. 256 cures most * it is absolutely not enough even at 100conn/sec. 256 cures most
* of problems. This value is adjusted to 128 for very small machines * of problems.
* (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb). * This value is adjusted to 128 for low memory machines,
* and it will increase in proportion to the memory of machine.
* Note : Dont forget somaxconn that may limit backlog too. * Note : Dont forget somaxconn that may limit backlog too.
*/ */
int sysctl_max_syn_backlog = 256; int sysctl_max_syn_backlog = 256;

View File

@ -19,6 +19,7 @@ static int __init net_secret_init(void)
} }
late_initcall(net_secret_init); late_initcall(net_secret_init);
#ifdef CONFIG_INET
static u32 seq_scale(u32 seq) static u32 seq_scale(u32 seq)
{ {
/* /*
@ -33,6 +34,7 @@ static u32 seq_scale(u32 seq)
*/ */
return seq + (ktime_to_ns(ktime_get_real()) >> 6); return seq + (ktime_to_ns(ktime_get_real()) >> 6);
} }
#endif
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
__u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,

View File

@ -112,7 +112,7 @@
#include <net/secure_seq.h> #include <net/secure_seq.h>
#define RT_FL_TOS(oldflp4) \ #define RT_FL_TOS(oldflp4) \
((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))) ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
#define IP_MAX_MTU 0xFFF0 #define IP_MAX_MTU 0xFFF0
@ -1310,7 +1310,7 @@ static void rt_del(unsigned hash, struct rtable *rt)
spin_unlock_bh(rt_hash_lock_addr(hash)); spin_unlock_bh(rt_hash_lock_addr(hash));
} }
static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer) static void check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
{ {
struct rtable *rt = (struct rtable *) dst; struct rtable *rt = (struct rtable *) dst;
__be32 orig_gw = rt->rt_gateway; __be32 orig_gw = rt->rt_gateway;
@ -1321,21 +1321,19 @@ static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
rt->rt_gateway = peer->redirect_learned.a4; rt->rt_gateway = peer->redirect_learned.a4;
n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway); n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
if (IS_ERR(n)) if (IS_ERR(n)) {
return PTR_ERR(n); rt->rt_gateway = orig_gw;
return;
}
old_n = xchg(&rt->dst._neighbour, n); old_n = xchg(&rt->dst._neighbour, n);
if (old_n) if (old_n)
neigh_release(old_n); neigh_release(old_n);
if (!n || !(n->nud_state & NUD_VALID)) { if (!(n->nud_state & NUD_VALID)) {
if (n) neigh_event_send(n, NULL);
neigh_event_send(n, NULL);
rt->rt_gateway = orig_gw;
return -EAGAIN;
} else { } else {
rt->rt_flags |= RTCF_REDIRECTED; rt->rt_flags |= RTCF_REDIRECTED;
call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n); call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
} }
return 0;
} }
/* called in rcu_read_lock() section */ /* called in rcu_read_lock() section */
@ -1693,7 +1691,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
} }
static struct rtable *ipv4_validate_peer(struct rtable *rt) static void ipv4_validate_peer(struct rtable *rt)
{ {
if (rt->rt_peer_genid != rt_peer_genid()) { if (rt->rt_peer_genid != rt_peer_genid()) {
struct inet_peer *peer; struct inet_peer *peer;
@ -1708,15 +1706,12 @@ static struct rtable *ipv4_validate_peer(struct rtable *rt)
if (peer->redirect_genid != redirect_genid) if (peer->redirect_genid != redirect_genid)
peer->redirect_learned.a4 = 0; peer->redirect_learned.a4 = 0;
if (peer->redirect_learned.a4 && if (peer->redirect_learned.a4 &&
peer->redirect_learned.a4 != rt->rt_gateway) { peer->redirect_learned.a4 != rt->rt_gateway)
if (check_peer_redir(&rt->dst, peer)) check_peer_redir(&rt->dst, peer);
return NULL;
}
} }
rt->rt_peer_genid = rt_peer_genid(); rt->rt_peer_genid = rt_peer_genid();
} }
return rt;
} }
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
@ -1725,7 +1720,7 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
if (rt_is_expired(rt)) if (rt_is_expired(rt))
return NULL; return NULL;
dst = (struct dst_entry *) ipv4_validate_peer(rt); ipv4_validate_peer(rt);
return dst; return dst;
} }
@ -2380,9 +2375,7 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
rth->rt_mark == skb->mark && rth->rt_mark == skb->mark &&
net_eq(dev_net(rth->dst.dev), net) && net_eq(dev_net(rth->dst.dev), net) &&
!rt_is_expired(rth)) { !rt_is_expired(rth)) {
rth = ipv4_validate_peer(rth); ipv4_validate_peer(rth);
if (!rth)
continue;
if (noref) { if (noref) {
dst_use_noref(&rth->dst, jiffies); dst_use_noref(&rth->dst, jiffies);
skb_dst_set_noref(skb, &rth->dst); skb_dst_set_noref(skb, &rth->dst);
@ -2441,11 +2434,11 @@ EXPORT_SYMBOL(ip_route_input_common);
static struct rtable *__mkroute_output(const struct fib_result *res, static struct rtable *__mkroute_output(const struct fib_result *res,
const struct flowi4 *fl4, const struct flowi4 *fl4,
__be32 orig_daddr, __be32 orig_saddr, __be32 orig_daddr, __be32 orig_saddr,
int orig_oif, struct net_device *dev_out, int orig_oif, __u8 orig_rtos,
struct net_device *dev_out,
unsigned int flags) unsigned int flags)
{ {
struct fib_info *fi = res->fi; struct fib_info *fi = res->fi;
u32 tos = RT_FL_TOS(fl4);
struct in_device *in_dev; struct in_device *in_dev;
u16 type = res->type; u16 type = res->type;
struct rtable *rth; struct rtable *rth;
@ -2496,7 +2489,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
rth->rt_genid = rt_genid(dev_net(dev_out)); rth->rt_genid = rt_genid(dev_net(dev_out));
rth->rt_flags = flags; rth->rt_flags = flags;
rth->rt_type = type; rth->rt_type = type;
rth->rt_key_tos = tos; rth->rt_key_tos = orig_rtos;
rth->rt_dst = fl4->daddr; rth->rt_dst = fl4->daddr;
rth->rt_src = fl4->saddr; rth->rt_src = fl4->saddr;
rth->rt_route_iif = 0; rth->rt_route_iif = 0;
@ -2546,7 +2539,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4) static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
{ {
struct net_device *dev_out = NULL; struct net_device *dev_out = NULL;
u32 tos = RT_FL_TOS(fl4); __u8 tos = RT_FL_TOS(fl4);
unsigned int flags = 0; unsigned int flags = 0;
struct fib_result res; struct fib_result res;
struct rtable *rth; struct rtable *rth;
@ -2722,7 +2715,7 @@ static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
make_route: make_route:
rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif, rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif,
dev_out, flags); tos, dev_out, flags);
if (!IS_ERR(rth)) { if (!IS_ERR(rth)) {
unsigned int hash; unsigned int hash;
@ -2758,9 +2751,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
(IPTOS_RT_MASK | RTO_ONLINK)) && (IPTOS_RT_MASK | RTO_ONLINK)) &&
net_eq(dev_net(rth->dst.dev), net) && net_eq(dev_net(rth->dst.dev), net) &&
!rt_is_expired(rth)) { !rt_is_expired(rth)) {
rth = ipv4_validate_peer(rth); ipv4_validate_peer(rth);
if (!rth)
continue;
dst_use(&rth->dst, jiffies); dst_use(&rth->dst, jiffies);
RT_CACHE_STAT_INC(out_hit); RT_CACHE_STAT_INC(out_hit);
rcu_read_unlock_bh(); rcu_read_unlock_bh();

View File

@ -757,6 +757,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (!local->int_scan_req) if (!local->int_scan_req)
return -ENOMEM; return -ENOMEM;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
if (!local->hw.wiphy->bands[band])
continue;
local->int_scan_req->rates[band] = (u32) -1;
}
/* if low-level driver supports AP, we also support VLAN */ /* if low-level driver supports AP, we also support VLAN */
if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) { if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN); hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);

View File

@ -1039,7 +1039,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
struct ieee80211_sub_if_data, struct ieee80211_sub_if_data,
u.ap); u.ap);
memset(&sta->sta.drv_priv, 0, hw->sta_data_size);
WARN_ON(drv_sta_add(local, sdata, &sta->sta)); WARN_ON(drv_sta_add(local, sdata, &sta->sta));
} }
} }

View File

@ -57,8 +57,17 @@
#define REG_DBG_PRINT(args...) #define REG_DBG_PRINT(args...)
#endif #endif
static struct regulatory_request core_request_world = {
.initiator = NL80211_REGDOM_SET_BY_CORE,
.alpha2[0] = '0',
.alpha2[1] = '0',
.intersect = false,
.processed = true,
.country_ie_env = ENVIRON_ANY,
};
/* Receipt of information from last regulatory request */ /* Receipt of information from last regulatory request */
static struct regulatory_request *last_request; static struct regulatory_request *last_request = &core_request_world;
/* To trigger userspace events */ /* To trigger userspace events */
static struct platform_device *reg_pdev; static struct platform_device *reg_pdev;
@ -150,7 +159,7 @@ static char user_alpha2[2];
module_param(ieee80211_regdom, charp, 0444); module_param(ieee80211_regdom, charp, 0444);
MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
static void reset_regdomains(void) static void reset_regdomains(bool full_reset)
{ {
/* avoid freeing static information or freeing something twice */ /* avoid freeing static information or freeing something twice */
if (cfg80211_regdomain == cfg80211_world_regdom) if (cfg80211_regdomain == cfg80211_world_regdom)
@ -165,6 +174,13 @@ static void reset_regdomains(void)
cfg80211_world_regdom = &world_regdom; cfg80211_world_regdom = &world_regdom;
cfg80211_regdomain = NULL; cfg80211_regdomain = NULL;
if (!full_reset)
return;
if (last_request != &core_request_world)
kfree(last_request);
last_request = &core_request_world;
} }
/* /*
@ -175,7 +191,7 @@ static void update_world_regdomain(const struct ieee80211_regdomain *rd)
{ {
BUG_ON(!last_request); BUG_ON(!last_request);
reset_regdomains(); reset_regdomains(false);
cfg80211_world_regdom = rd; cfg80211_world_regdom = rd;
cfg80211_regdomain = rd; cfg80211_regdomain = rd;
@ -1407,7 +1423,8 @@ static int __regulatory_hint(struct wiphy *wiphy,
} }
new_request: new_request:
kfree(last_request); if (last_request != &core_request_world)
kfree(last_request);
last_request = pending_request; last_request = pending_request;
last_request->intersect = intersect; last_request->intersect = intersect;
@ -1577,9 +1594,6 @@ static int regulatory_hint_core(const char *alpha2)
{ {
struct regulatory_request *request; struct regulatory_request *request;
kfree(last_request);
last_request = NULL;
request = kzalloc(sizeof(struct regulatory_request), request = kzalloc(sizeof(struct regulatory_request),
GFP_KERNEL); GFP_KERNEL);
if (!request) if (!request)
@ -1777,7 +1791,7 @@ static void restore_regulatory_settings(bool reset_user)
mutex_lock(&cfg80211_mutex); mutex_lock(&cfg80211_mutex);
mutex_lock(&reg_mutex); mutex_lock(&reg_mutex);
reset_regdomains(); reset_regdomains(true);
restore_alpha2(alpha2, reset_user); restore_alpha2(alpha2, reset_user);
/* /*
@ -2037,8 +2051,10 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
} }
request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
if (!request_wiphy) { if (!request_wiphy &&
reg_set_request_processed(); (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER ||
last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)) {
schedule_delayed_work(&reg_timeout, 0);
return -ENODEV; return -ENODEV;
} }
@ -2046,7 +2062,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
int r; int r;
if (last_request->initiator != NL80211_REGDOM_SET_BY_DRIVER) { if (last_request->initiator != NL80211_REGDOM_SET_BY_DRIVER) {
reset_regdomains(); reset_regdomains(false);
cfg80211_regdomain = rd; cfg80211_regdomain = rd;
return 0; return 0;
} }
@ -2067,7 +2083,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
if (r) if (r)
return r; return r;
reset_regdomains(); reset_regdomains(false);
cfg80211_regdomain = rd; cfg80211_regdomain = rd;
return 0; return 0;
} }
@ -2092,7 +2108,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
rd = NULL; rd = NULL;
reset_regdomains(); reset_regdomains(false);
cfg80211_regdomain = intersected_rd; cfg80211_regdomain = intersected_rd;
return 0; return 0;
@ -2112,7 +2128,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
kfree(rd); kfree(rd);
rd = NULL; rd = NULL;
reset_regdomains(); reset_regdomains(false);
cfg80211_regdomain = intersected_rd; cfg80211_regdomain = intersected_rd;
return 0; return 0;
@ -2265,11 +2281,8 @@ void /* __init_or_exit */ regulatory_exit(void)
mutex_lock(&cfg80211_mutex); mutex_lock(&cfg80211_mutex);
mutex_lock(&reg_mutex); mutex_lock(&reg_mutex);
reset_regdomains(); reset_regdomains(true);
kfree(last_request);
last_request = NULL;
dev_set_uevent_suppress(&reg_pdev->dev, true); dev_set_uevent_suppress(&reg_pdev->dev, true);
platform_device_unregister(reg_pdev); platform_device_unregister(reg_pdev);