Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (55 commits)
  ISDN, hfcsusb: Don't leak in hfcsusb_ph_info()
  netpoll: call dev_put() on error in netpoll_setup()
  net: ep93xx_eth: fix DMA API violations
  net: ep93xx_eth: drop GFP_DMA from call to dma_alloc_coherent()
  net: ep93xx_eth: allocate buffers using kmalloc()
  net: ep93xx_eth: pass struct device to DMA API functions
  ep93xx: set DMA masks for the ep93xx_eth
  vlan: Fix the ingress VLAN_FLAG_REORDER_HDR check
  dl2k: EEPROM CRC calculation wrong endianess on bigendian machine
  NET: am79c961: fix assembler warnings
  NET: am79c961: ensure multicast filter is correctly set at open
  NET: am79c961: ensure asm() statements are marked volatile
  ethtool.h: fix typos
  ep93xx_eth: Update MAINTAINERS
  ipv4: Fix packet size calculation for raw IPsec packets in __ip_append_data
  netpoll: prevent netpoll setup on slave devices
  net: pmtu_expires fixes
  gianfar:localized filer table
  iwlegacy: fix channel switch locking
  mac80211: fix IBSS teardown race
  ...
This commit is contained in:
Linus Torvalds 2011-06-12 11:03:08 -07:00
commit 152b92db7a
87 changed files with 553 additions and 545 deletions

View File

@ -1739,7 +1739,7 @@ S: Supported
F: drivers/net/enic/
CIRRUS LOGIC EP93XX ETHERNET DRIVER
M: Lennert Buytenhek <kernel@wantstofly.org>
M: Hartley Sweeten <hsweeten@visionengravers.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/arm/ep93xx_eth.c

View File

@ -402,11 +402,15 @@ static struct resource ep93xx_eth_resource[] = {
}
};
static u64 ep93xx_eth_dma_mask = DMA_BIT_MASK(32);
static struct platform_device ep93xx_eth_device = {
.name = "ep93xx-eth",
.id = -1,
.dev = {
.platform_data = &ep93xx_eth_data,
.platform_data = &ep93xx_eth_data,
.coherent_dma_mask = DMA_BIT_MASK(32),
.dma_mask = &ep93xx_eth_dma_mask,
},
.num_resources = ARRAY_SIZE(ep93xx_eth_resource),
.resource = ep93xx_eth_resource,

View File

@ -283,6 +283,7 @@ hfcsusb_ph_info(struct hfcsusb *hw)
_queue_data(&dch->dev.D, MPH_INFORMATION_IND, MISDN_ID_ANY,
sizeof(struct ph_info_dch) + dch->dev.nrbchan *
sizeof(struct ph_info_ch), phi, GFP_ATOMIC);
kfree(phi);
}
/*

View File

@ -50,7 +50,7 @@ static const char version[] =
#ifdef __arm__
static void write_rreg(u_long base, u_int reg, u_int val)
{
__asm__(
asm volatile(
"str%?h %1, [%2] @ NET_RAP\n\t"
"str%?h %0, [%2, #-4] @ NET_RDP"
:
@ -60,7 +60,7 @@ static void write_rreg(u_long base, u_int reg, u_int val)
static inline unsigned short read_rreg(u_long base_addr, u_int reg)
{
unsigned short v;
__asm__(
asm volatile(
"str%?h %1, [%2] @ NET_RAP\n\t"
"ldr%?h %0, [%2, #-4] @ NET_RDP"
: "=r" (v)
@ -70,7 +70,7 @@ static inline unsigned short read_rreg(u_long base_addr, u_int reg)
static inline void write_ireg(u_long base, u_int reg, u_int val)
{
__asm__(
asm volatile(
"str%?h %1, [%2] @ NET_RAP\n\t"
"str%?h %0, [%2, #8] @ NET_IDP"
:
@ -80,7 +80,7 @@ static inline void write_ireg(u_long base, u_int reg, u_int val)
static inline unsigned short read_ireg(u_long base_addr, u_int reg)
{
u_short v;
__asm__(
asm volatile(
"str%?h %1, [%2] @ NAT_RAP\n\t"
"ldr%?h %0, [%2, #8] @ NET_IDP\n\t"
: "=r" (v)
@ -91,47 +91,48 @@ static inline unsigned short read_ireg(u_long base_addr, u_int reg)
#define am_writeword(dev,off,val) __raw_writew(val, ISAMEM_BASE + ((off) << 1))
#define am_readword(dev,off) __raw_readw(ISAMEM_BASE + ((off) << 1))
static inline void
static void
am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
{
offset = ISAMEM_BASE + (offset << 1);
length = (length + 1) & ~1;
if ((int)buf & 2) {
__asm__ __volatile__("str%?h %2, [%0], #4"
asm volatile("str%?h %2, [%0], #4"
: "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
buf += 2;
length -= 2;
}
while (length > 8) {
unsigned int tmp, tmp2;
__asm__ __volatile__(
"ldm%?ia %1!, {%2, %3}\n\t"
register unsigned int tmp asm("r2"), tmp2 asm("r3");
asm volatile(
"ldm%?ia %0!, {%1, %2}"
: "+r" (buf), "=&r" (tmp), "=&r" (tmp2));
length -= 8;
asm volatile(
"str%?h %1, [%0], #4\n\t"
"mov%? %1, %1, lsr #16\n\t"
"str%?h %1, [%0], #4\n\t"
"str%?h %2, [%0], #4\n\t"
"mov%? %2, %2, lsr #16\n\t"
"str%?h %2, [%0], #4\n\t"
"str%?h %3, [%0], #4\n\t"
"mov%? %3, %3, lsr #16\n\t"
"str%?h %3, [%0], #4"
: "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2)
: "0" (offset), "1" (buf));
length -= 8;
"str%?h %2, [%0], #4"
: "+r" (offset), "=&r" (tmp), "=&r" (tmp2));
}
while (length > 0) {
__asm__ __volatile__("str%?h %2, [%0], #4"
asm volatile("str%?h %2, [%0], #4"
: "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
buf += 2;
length -= 2;
}
}
static inline void
static void
am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
{
offset = ISAMEM_BASE + (offset << 1);
length = (length + 1) & ~1;
if ((int)buf & 2) {
unsigned int tmp;
__asm__ __volatile__(
asm volatile(
"ldr%?h %2, [%0], #4\n\t"
"str%?b %2, [%1], #1\n\t"
"mov%? %2, %2, lsr #8\n\t"
@ -140,12 +141,12 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned
length -= 2;
}
while (length > 8) {
unsigned int tmp, tmp2, tmp3;
__asm__ __volatile__(
register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3;
asm volatile(
"ldr%?h %2, [%0], #4\n\t"
"ldr%?h %4, [%0], #4\n\t"
"ldr%?h %3, [%0], #4\n\t"
"orr%? %2, %2, %3, lsl #16\n\t"
"ldr%?h %3, [%0], #4\n\t"
"orr%? %2, %2, %4, lsl #16\n\t"
"ldr%?h %4, [%0], #4\n\t"
"orr%? %3, %3, %4, lsl #16\n\t"
"stm%?ia %1!, {%2, %3}"
@ -155,7 +156,7 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned
}
while (length > 0) {
unsigned int tmp;
__asm__ __volatile__(
asm volatile(
"ldr%?h %2, [%0], #4\n\t"
"str%?b %2, [%1], #1\n\t"
"mov%? %2, %2, lsr #8\n\t"
@ -196,6 +197,42 @@ am79c961_ramtest(struct net_device *dev, unsigned int val)
return errorcount;
}
static void am79c961_mc_hash(char *addr, u16 *hash)
{
if (addr[0] & 0x01) {
int idx, bit;
u32 crc;
crc = ether_crc_le(ETH_ALEN, addr);
idx = crc >> 30;
bit = (crc >> 26) & 15;
hash[idx] |= 1 << bit;
}
}
static unsigned int am79c961_get_rx_mode(struct net_device *dev, u16 *hash)
{
unsigned int mode = MODE_PORT_10BT;
if (dev->flags & IFF_PROMISC) {
mode |= MODE_PROMISC;
memset(hash, 0xff, 4 * sizeof(*hash));
} else if (dev->flags & IFF_ALLMULTI) {
memset(hash, 0xff, 4 * sizeof(*hash));
} else {
struct netdev_hw_addr *ha;
memset(hash, 0, 4 * sizeof(*hash));
netdev_for_each_mc_addr(ha, dev)
am79c961_mc_hash(ha->addr, hash);
}
return mode;
}
static void
am79c961_init_for_open(struct net_device *dev)
{
@ -203,6 +240,7 @@ am79c961_init_for_open(struct net_device *dev)
unsigned long flags;
unsigned char *p;
u_int hdr_addr, first_free_addr;
u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
int i;
/*
@ -218,16 +256,12 @@ am79c961_init_for_open(struct net_device *dev)
write_ireg (dev->base_addr, 2, 0x0000); /* MODE register selects media */
for (i = LADRL; i <= LADRH; i++)
write_rreg (dev->base_addr, i, 0);
write_rreg (dev->base_addr, i, multi_hash[i - LADRL]);
for (i = PADRL, p = dev->dev_addr; i <= PADRH; i++, p += 2)
write_rreg (dev->base_addr, i, p[0] | (p[1] << 8));
i = MODE_PORT_10BT;
if (dev->flags & IFF_PROMISC)
i |= MODE_PROMISC;
write_rreg (dev->base_addr, MODE, i);
write_rreg (dev->base_addr, MODE, mode);
write_rreg (dev->base_addr, POLLINT, 0);
write_rreg (dev->base_addr, SIZERXR, -RX_BUFFERS);
write_rreg (dev->base_addr, SIZETXR, -TX_BUFFERS);
@ -340,21 +374,6 @@ am79c961_close(struct net_device *dev)
return 0;
}
static void am79c961_mc_hash(char *addr, unsigned short *hash)
{
if (addr[0] & 0x01) {
int idx, bit;
u32 crc;
crc = ether_crc_le(ETH_ALEN, addr);
idx = crc >> 30;
bit = (crc >> 26) & 15;
hash[idx] |= 1 << bit;
}
}
/*
* Set or clear promiscuous/multicast mode filter for this adapter.
*/
@ -362,24 +381,9 @@ static void am79c961_setmulticastlist (struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
unsigned long flags;
unsigned short multi_hash[4], mode;
u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
int i, stopped;
mode = MODE_PORT_10BT;
if (dev->flags & IFF_PROMISC) {
mode |= MODE_PROMISC;
} else if (dev->flags & IFF_ALLMULTI) {
memset(multi_hash, 0xff, sizeof(multi_hash));
} else {
struct netdev_hw_addr *ha;
memset(multi_hash, 0x00, sizeof(multi_hash));
netdev_for_each_mc_addr(ha, dev)
am79c961_mc_hash(ha->addr, multi_hash);
}
spin_lock_irqsave(&priv->chip_lock, flags);
stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP;

View File

@ -283,10 +283,14 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
skb = dev_alloc_skb(length + 2);
if (likely(skb != NULL)) {
struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry];
skb_reserve(skb, 2);
dma_sync_single_for_cpu(NULL, ep->descs->rdesc[entry].buf_addr,
dma_sync_single_for_cpu(dev->dev.parent, rxd->buf_addr,
length, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, ep->rx_buf[entry], length);
dma_sync_single_for_device(dev->dev.parent,
rxd->buf_addr, length,
DMA_FROM_DEVICE);
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, dev);
@ -348,6 +352,7 @@ static int ep93xx_poll(struct napi_struct *napi, int budget)
static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ep93xx_priv *ep = netdev_priv(dev);
struct ep93xx_tdesc *txd;
int entry;
if (unlikely(skb->len > MAX_PKT_SIZE)) {
@ -359,11 +364,14 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
entry = ep->tx_pointer;
ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1);
ep->descs->tdesc[entry].tdesc1 =
TDESC1_EOF | (entry << 16) | (skb->len & 0xfff);
txd = &ep->descs->tdesc[entry];
txd->tdesc1 = TDESC1_EOF | (entry << 16) | (skb->len & 0xfff);
dma_sync_single_for_cpu(dev->dev.parent, txd->buf_addr, skb->len,
DMA_TO_DEVICE);
skb_copy_and_csum_dev(skb, ep->tx_buf[entry]);
dma_sync_single_for_cpu(NULL, ep->descs->tdesc[entry].buf_addr,
skb->len, DMA_TO_DEVICE);
dma_sync_single_for_device(dev->dev.parent, txd->buf_addr, skb->len,
DMA_TO_DEVICE);
dev_kfree_skb(skb);
spin_lock_irq(&ep->tx_pending_lock);
@ -457,89 +465,80 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id)
static void ep93xx_free_buffers(struct ep93xx_priv *ep)
{
struct device *dev = ep->dev->dev.parent;
int i;
for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) {
for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
dma_addr_t d;
d = ep->descs->rdesc[i].buf_addr;
if (d)
dma_unmap_single(NULL, d, PAGE_SIZE, DMA_FROM_DEVICE);
dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE);
if (ep->rx_buf[i] != NULL)
free_page((unsigned long)ep->rx_buf[i]);
kfree(ep->rx_buf[i]);
}
for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) {
for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
dma_addr_t d;
d = ep->descs->tdesc[i].buf_addr;
if (d)
dma_unmap_single(NULL, d, PAGE_SIZE, DMA_TO_DEVICE);
dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE);
if (ep->tx_buf[i] != NULL)
free_page((unsigned long)ep->tx_buf[i]);
kfree(ep->tx_buf[i]);
}
dma_free_coherent(NULL, sizeof(struct ep93xx_descs), ep->descs,
dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs,
ep->descs_dma_addr);
}
/*
* The hardware enforces a sub-2K maximum packet size, so we put
* two buffers on every hardware page.
*/
static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
{
struct device *dev = ep->dev->dev.parent;
int i;
ep->descs = dma_alloc_coherent(NULL, sizeof(struct ep93xx_descs),
&ep->descs_dma_addr, GFP_KERNEL | GFP_DMA);
ep->descs = dma_alloc_coherent(dev, sizeof(struct ep93xx_descs),
&ep->descs_dma_addr, GFP_KERNEL);
if (ep->descs == NULL)
return 1;
for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) {
void *page;
for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
void *buf;
dma_addr_t d;
page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
if (page == NULL)
buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
if (buf == NULL)
goto err;
d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(NULL, d)) {
free_page((unsigned long)page);
d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, d)) {
kfree(buf);
goto err;
}
ep->rx_buf[i] = page;
ep->rx_buf[i] = buf;
ep->descs->rdesc[i].buf_addr = d;
ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE;
ep->rx_buf[i + 1] = page + PKT_BUF_SIZE;
ep->descs->rdesc[i + 1].buf_addr = d + PKT_BUF_SIZE;
ep->descs->rdesc[i + 1].rdesc1 = ((i + 1) << 16) | PKT_BUF_SIZE;
}
for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) {
void *page;
for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
void *buf;
dma_addr_t d;
page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
if (page == NULL)
buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
if (buf == NULL)
goto err;
d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(NULL, d)) {
free_page((unsigned long)page);
d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev, d)) {
kfree(buf);
goto err;
}
ep->tx_buf[i] = page;
ep->tx_buf[i] = buf;
ep->descs->tdesc[i].buf_addr = d;
ep->tx_buf[i + 1] = page + PKT_BUF_SIZE;
ep->descs->tdesc[i + 1].buf_addr = d + PKT_BUF_SIZE;
}
return 0;
@ -829,6 +828,7 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
}
ep = netdev_priv(dev);
ep->dev = dev;
SET_NETDEV_DEV(dev, &pdev->dev);
netif_napi_add(dev, &ep->napi, ep93xx_poll, 64);
platform_set_drvdata(pdev, dev);

View File

@ -388,6 +388,8 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
return next;
}
#define bond_queue_mapping(skb) (*(u16 *)((skb)->cb))
/**
* bond_dev_queue_xmit - Prepare skb for xmit.
*
@ -400,6 +402,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
{
skb->dev = slave_dev;
skb->priority = 1;
skb->queue_mapping = bond_queue_mapping(skb);
if (unlikely(netpoll_tx_running(slave_dev)))
bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
else
@ -4206,6 +4211,7 @@ static inline int bond_slave_override(struct bonding *bond,
return res;
}
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
{
/*
@ -4216,6 +4222,11 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
*/
u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
/*
* Save the original txq to restore before passing to the driver
*/
bond_queue_mapping(skb) = skb->queue_mapping;
if (unlikely(txq >= dev->real_num_tx_queues)) {
do {
txq -= dev->real_num_tx_queues;

View File

@ -346,7 +346,7 @@ parse_eeprom (struct net_device *dev)
if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */
/* Check CRC */
crc = ~ether_crc_le (256 - 4, sromdata);
if (psrom->crc != crc) {
if (psrom->crc != cpu_to_le32(crc)) {
printk (KERN_ERR "%s: EEPROM data CRC error.\n",
dev->name);
return -1;

View File

@ -10,7 +10,7 @@
* Maintainer: Kumar Gala
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
*
* Copyright 2002-2009 Freescale Semiconductor, Inc.
* Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
* Copyright 2007 MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify it
@ -476,9 +476,6 @@ static const struct net_device_ops gfar_netdev_ops = {
#endif
};
unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
void lock_rx_qs(struct gfar_private *priv)
{
int i = 0x0;
@ -868,28 +865,28 @@ static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
rqfar--;
rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
ftp_rqfpr[rqfar] = rqfpr;
ftp_rqfcr[rqfar] = rqfcr;
priv->ftp_rqfpr[rqfar] = rqfpr;
priv->ftp_rqfcr[rqfar] = rqfcr;
gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
rqfar--;
rqfcr = RQFCR_CMP_NOMATCH;
ftp_rqfpr[rqfar] = rqfpr;
ftp_rqfcr[rqfar] = rqfcr;
priv->ftp_rqfpr[rqfar] = rqfpr;
priv->ftp_rqfcr[rqfar] = rqfcr;
gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
rqfar--;
rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
rqfpr = class;
ftp_rqfcr[rqfar] = rqfcr;
ftp_rqfpr[rqfar] = rqfpr;
priv->ftp_rqfcr[rqfar] = rqfcr;
priv->ftp_rqfpr[rqfar] = rqfpr;
gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
rqfar--;
rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
rqfpr = class;
ftp_rqfcr[rqfar] = rqfcr;
ftp_rqfpr[rqfar] = rqfpr;
priv->ftp_rqfcr[rqfar] = rqfcr;
priv->ftp_rqfpr[rqfar] = rqfpr;
gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
return rqfar;
@ -904,8 +901,8 @@ static void gfar_init_filer_table(struct gfar_private *priv)
/* Default rule */
rqfcr = RQFCR_CMP_MATCH;
ftp_rqfcr[rqfar] = rqfcr;
ftp_rqfpr[rqfar] = rqfpr;
priv->ftp_rqfcr[rqfar] = rqfcr;
priv->ftp_rqfpr[rqfar] = rqfpr;
gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
@ -921,8 +918,8 @@ static void gfar_init_filer_table(struct gfar_private *priv)
/* Rest are masked rules */
rqfcr = RQFCR_CMP_NOMATCH;
for (i = 0; i < rqfar; i++) {
ftp_rqfcr[i] = rqfcr;
ftp_rqfpr[i] = rqfpr;
priv->ftp_rqfcr[i] = rqfcr;
priv->ftp_rqfpr[i] = rqfpr;
gfar_write_filer(priv, i, rqfcr, rqfpr);
}
}

View File

@ -9,7 +9,7 @@
* Maintainer: Kumar Gala
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
*
* Copyright 2002-2009 Freescale Semiconductor, Inc.
* Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -1107,10 +1107,12 @@ struct gfar_private {
/* HW time stamping enabled flag */
int hwts_rx_en;
int hwts_tx_en;
/*Filer table*/
unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
};
extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
extern unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
static inline int gfar_has_errata(struct gfar_private *priv,
enum gfar_errata err)

View File

@ -9,7 +9,7 @@
* Maintainer: Kumar Gala
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
*
* Copyright 2003-2006, 2008-2009 Freescale Semiconductor, Inc.
* Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
*
* This software may be used and distributed according to
* the terms of the GNU Public License, Version 2, incorporated herein
@ -609,15 +609,15 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L2DA) {
fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
ftp_rqfpr[priv->cur_filer_idx] = fpr;
ftp_rqfcr[priv->cur_filer_idx] = fcr;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
ftp_rqfpr[priv->cur_filer_idx] = fpr;
ftp_rqfcr[priv->cur_filer_idx] = fcr;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
}
@ -626,16 +626,16 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
RQFCR_AND | RQFCR_HASHTBL_0;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
ftp_rqfpr[priv->cur_filer_idx] = fpr;
ftp_rqfcr[priv->cur_filer_idx] = fcr;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
priv->cur_filer_idx = priv->cur_filer_idx - 1;
}
if (ethflow & RXH_IP_SRC) {
fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
RQFCR_AND | RQFCR_HASHTBL_0;
ftp_rqfpr[priv->cur_filer_idx] = fpr;
ftp_rqfcr[priv->cur_filer_idx] = fcr;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
}
@ -643,8 +643,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & (RXH_IP_DST)) {
fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
RQFCR_AND | RQFCR_HASHTBL_0;
ftp_rqfpr[priv->cur_filer_idx] = fpr;
ftp_rqfcr[priv->cur_filer_idx] = fcr;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
}
@ -652,8 +652,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L3_PROTO) {
fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
RQFCR_AND | RQFCR_HASHTBL_0;
ftp_rqfpr[priv->cur_filer_idx] = fpr;
ftp_rqfcr[priv->cur_filer_idx] = fcr;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
}
@ -661,8 +661,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L4_B_0_1) {
fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
RQFCR_AND | RQFCR_HASHTBL_0;
ftp_rqfpr[priv->cur_filer_idx] = fpr;
ftp_rqfcr[priv->cur_filer_idx] = fcr;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
}
@ -670,8 +670,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L4_B_2_3) {
fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
RQFCR_AND | RQFCR_HASHTBL_0;
ftp_rqfpr[priv->cur_filer_idx] = fpr;
ftp_rqfcr[priv->cur_filer_idx] = fcr;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
}
@ -705,12 +705,12 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
}
for (i = 0; i < MAX_FILER_IDX + 1; i++) {
local_rqfpr[j] = ftp_rqfpr[i];
local_rqfcr[j] = ftp_rqfcr[i];
local_rqfpr[j] = priv->ftp_rqfpr[i];
local_rqfcr[j] = priv->ftp_rqfcr[i];
j--;
if ((ftp_rqfcr[i] == (RQFCR_PID_PARSE |
if ((priv->ftp_rqfcr[i] == (RQFCR_PID_PARSE |
RQFCR_CLE |RQFCR_AND)) &&
(ftp_rqfpr[i] == cmp_rqfpr))
(priv->ftp_rqfpr[i] == cmp_rqfpr))
break;
}
@ -724,20 +724,22 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
* if it was already programmed, we need to overwrite these rules
*/
for (l = i+1; l < MAX_FILER_IDX; l++) {
if ((ftp_rqfcr[l] & RQFCR_CLE) &&
!(ftp_rqfcr[l] & RQFCR_AND)) {
ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
!(priv->ftp_rqfcr[l] & RQFCR_AND)) {
priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
ftp_rqfpr[l] = FPR_FILER_MASK;
gfar_write_filer(priv, l, ftp_rqfcr[l], ftp_rqfpr[l]);
priv->ftp_rqfpr[l] = FPR_FILER_MASK;
gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
priv->ftp_rqfpr[l]);
break;
}
if (!(ftp_rqfcr[l] & RQFCR_CLE) && (ftp_rqfcr[l] & RQFCR_AND))
if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) &&
(priv->ftp_rqfcr[l] & RQFCR_AND))
continue;
else {
local_rqfpr[j] = ftp_rqfpr[l];
local_rqfcr[j] = ftp_rqfcr[l];
local_rqfpr[j] = priv->ftp_rqfpr[l];
local_rqfcr[j] = priv->ftp_rqfcr[l];
j--;
}
}
@ -750,8 +752,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
/* Write back the popped out rules again */
for (k = j+1; k < MAX_FILER_IDX; k++) {
ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
gfar_write_filer(priv, priv->cur_filer_idx,
local_rqfcr[k], local_rqfpr[k]);
if (!priv->cur_filer_idx)

View File

@ -2373,6 +2373,9 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
}
#endif /* CONFIG_PCI_IOV */
adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
/* i350 cannot do RSS and SR-IOV at the same time */
if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
adapter->rss_queues = 1;
/*
* if rss_queues > 4 or vfs are going to be allocated with rss_queues

View File

@ -1406,6 +1406,7 @@ qlcnic_dump_que(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
for (loop = 0; loop < que->no_ops; loop++) {
QLCNIC_WR_DUMP_REG(que->sel_addr, base, que_id);
addr = que->read_addr;
for (i = 0; i < cnt; i++) {
QLCNIC_RD_DUMP_REG(addr, base, &data);
*buffer++ = cpu_to_le32(data);

View File

@ -2159,6 +2159,7 @@ qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
nf = &pbuf->frag_array[0];
pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
pbuf->skb = NULL;
}
static inline void

View File

@ -2400,8 +2400,10 @@ static const struct of_device_id smc91x_match[] = {
{ .compatible = "smsc,lan91c94", },
{ .compatible = "smsc,lan91c111", },
{},
}
};
MODULE_DEVICE_TABLE(of, smc91x_match);
#else
#define smc91x_match NULL
#endif
static struct dev_pm_ops smc_drv_pm_ops = {
@ -2416,9 +2418,7 @@ static struct platform_driver smc_driver = {
.name = CARDNAME,
.owner = THIS_MODULE,
.pm = &smc_drv_pm_ops,
#ifdef CONFIG_OF
.of_match_table = smc91x_match,
#endif
},
};

View File

@ -72,6 +72,11 @@ static int modparam_all_channels;
module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
static int modparam_fastchanswitch;
module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
/* Module info */
MODULE_AUTHOR("Jiri Slaby");
MODULE_AUTHOR("Nick Kossifidis");
@ -2686,6 +2691,7 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
struct ath5k_hw *ah = sc->ah;
struct ath_common *common = ath5k_hw_common(ah);
int ret, ani_mode;
bool fast;
ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n");
@ -2705,7 +2711,10 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
ath5k_drain_tx_buffs(sc);
if (chan)
sc->curchan = chan;
ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL,
fast = ((chan != NULL) && modparam_fastchanswitch) ? 1 : 0;
ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, fast,
skip_pcu);
if (ret) {
ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret);

View File

@ -1124,8 +1124,11 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
/* Non fatal, can happen eg.
* on mode change */
ret = 0;
} else
} else {
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_RESET,
"fast chan change successful\n");
return 0;
}
}
/*

View File

@ -1218,10 +1218,10 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
* receive commit_rxon request
* abort any previous channel switch if still in process
*/
if (priv->switch_rxon.switch_in_progress &&
(priv->switch_rxon.channel != ctx->staging.channel)) {
if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
(priv->switch_channel != ctx->staging.channel)) {
IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
le16_to_cpu(priv->switch_rxon.channel));
le16_to_cpu(priv->switch_channel));
iwl_legacy_chswitch_done(priv, false);
}
@ -1237,7 +1237,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
iwl_legacy_print_rx_config_cmd(priv, ctx);
return 0;
goto set_tx_power;
}
/* If we are currently associated and the new config requires
@ -1317,6 +1317,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
iwl4965_init_sensitivity(priv);
set_tx_power:
/* If we issue a new RXON command which required a tune then we must
* send a new TXPOWER command or we won't be able to Tx any frames */
ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
@ -1403,9 +1404,6 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
return rc;
}
priv->switch_rxon.channel = cmd.channel;
priv->switch_rxon.switch_in_progress = true;
return iwl_legacy_send_cmd_pdu(priv,
REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
}

View File

@ -859,12 +859,8 @@ void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
if (priv->switch_rxon.switch_in_progress) {
if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
ieee80211_chswitch_done(ctx->vif, is_success);
mutex_lock(&priv->mutex);
priv->switch_rxon.switch_in_progress = false;
mutex_unlock(&priv->mutex);
}
}
EXPORT_SYMBOL(iwl_legacy_chswitch_done);
@ -876,19 +872,19 @@ void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
if (priv->switch_rxon.switch_in_progress) {
if (!le32_to_cpu(csa->status) &&
(csa->channel == priv->switch_rxon.channel)) {
rxon->channel = csa->channel;
ctx->staging.channel = csa->channel;
IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
return;
if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
rxon->channel = csa->channel;
ctx->staging.channel = csa->channel;
IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
le16_to_cpu(csa->channel));
iwl_legacy_chswitch_done(priv, true);
} else {
IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
le16_to_cpu(csa->channel));
iwl_legacy_chswitch_done(priv, false);
}
iwl_legacy_chswitch_done(priv, true);
} else {
IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
le16_to_cpu(csa->channel));
iwl_legacy_chswitch_done(priv, false);
}
}
EXPORT_SYMBOL(iwl_legacy_rx_csa);

View File

@ -560,7 +560,7 @@ void iwl_legacy_free_geos(struct iwl_priv *priv);
#define STATUS_SCAN_HW 15
#define STATUS_POWER_PMI 16
#define STATUS_FW_ERROR 17
#define STATUS_CHANNEL_SWITCH_PENDING 18
static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
{

View File

@ -854,17 +854,6 @@ struct traffic_stats {
#endif
};
/*
* iwl_switch_rxon: "channel switch" structure
*
* @ switch_in_progress: channel switch in progress
* @ channel: new channel
*/
struct iwl_switch_rxon {
bool switch_in_progress;
__le16 channel;
};
/*
* schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
* to perform continuous uCode event logging operation if enabled
@ -1115,7 +1104,7 @@ struct iwl_priv {
struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
struct iwl_switch_rxon switch_rxon;
__le16 switch_channel;
/* 1st responses from initialize and runtime uCode images.
* _4965's initialize alive response contains some calibration data. */

View File

@ -2861,16 +2861,13 @@ void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
goto out;
if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
test_bit(STATUS_SCANNING, &priv->status))
test_bit(STATUS_SCANNING, &priv->status) ||
test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
goto out;
if (!iwl_legacy_is_associated_ctx(ctx))
goto out;
/* channel switch in progress */
if (priv->switch_rxon.switch_in_progress == true)
goto out;
if (priv->cfg->ops->lib->set_channel_switch) {
ch = channel->hw_value;
@ -2919,15 +2916,18 @@ void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
* at this point, staging_rxon has the
* configuration for channel switch
*/
if (priv->cfg->ops->lib->set_channel_switch(priv,
ch_switch))
priv->switch_rxon.switch_in_progress = false;
set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
priv->switch_channel = cpu_to_le16(ch);
if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) {
clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
&priv->status);
priv->switch_channel = 0;
ieee80211_chswitch_done(ctx->vif, false);
}
}
}
out:
mutex_unlock(&priv->mutex);
if (!priv->switch_rxon.switch_in_progress)
ieee80211_chswitch_done(ctx->vif, false);
IWL_DEBUG_MAC80211(priv, "leave\n");
}

View File

@ -177,79 +177,6 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
return 0;
}
static int iwl2030_hw_channel_switch(struct iwl_priv *priv,
struct ieee80211_channel_switch *ch_switch)
{
/*
* MULTI-FIXME
* See iwl_mac_channel_switch.
*/
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl6000_channel_switch_cmd cmd;
const struct iwl_channel_info *ch_info;
u32 switch_time_in_usec, ucode_switch_time;
u16 ch;
u32 tsf_low;
u8 switch_count;
u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
struct ieee80211_vif *vif = ctx->vif;
struct iwl_host_cmd hcmd = {
.id = REPLY_CHANNEL_SWITCH,
.len = { sizeof(cmd), },
.flags = CMD_SYNC,
.data = { &cmd, },
};
cmd.band = priv->band == IEEE80211_BAND_2GHZ;
ch = ch_switch->channel->hw_value;
IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
ctx->active.channel, ch);
cmd.channel = cpu_to_le16(ch);
cmd.rxon_flags = ctx->staging.flags;
cmd.rxon_filter_flags = ctx->staging.filter_flags;
switch_count = ch_switch->count;
tsf_low = ch_switch->timestamp & 0x0ffffffff;
/*
* calculate the ucode channel switch time
* adding TSF as one of the factor for when to switch
*/
if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
beacon_interval)) {
switch_count -= (priv->ucode_beacon_time -
tsf_low) / beacon_interval;
} else
switch_count = 0;
}
if (switch_count <= 1)
cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
else {
switch_time_in_usec =
vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
ucode_switch_time = iwl_usecs_to_beacons(priv,
switch_time_in_usec,
beacon_interval);
cmd.switch_time = iwl_add_beacon_time(priv,
priv->ucode_beacon_time,
ucode_switch_time,
beacon_interval);
}
IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
cmd.switch_time);
ch_info = iwl_get_channel_info(priv, priv->band, ch);
if (ch_info)
cmd.expect_beacon = is_channel_radar(ch_info);
else {
IWL_ERR(priv, "invalid channel switch from %u to %u\n",
ctx->active.channel, ch);
return -EFAULT;
}
priv->switch_rxon.channel = cmd.channel;
priv->switch_rxon.switch_in_progress = true;
return iwl_send_cmd_sync(priv, &hcmd);
}
static struct iwl_lib_ops iwl2000_lib = {
.set_hw_params = iwl2000_hw_set_hw_params,
.rx_handler_setup = iwlagn_rx_handler_setup,
@ -258,7 +185,6 @@ static struct iwl_lib_ops iwl2000_lib = {
.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
.send_tx_power = iwlagn_send_tx_power,
.update_chain_flags = iwl_update_chain_flags,
.set_channel_switch = iwl2030_hw_channel_switch,
.apm_ops = {
.init = iwl_apm_init,
.config = iwl2000_nic_config,

View File

@ -331,8 +331,6 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
ctx->active.channel, ch);
return -EFAULT;
}
priv->switch_rxon.channel = cmd.channel;
priv->switch_rxon.switch_in_progress = true;
return iwl_send_cmd_sync(priv, &hcmd);
}
@ -425,7 +423,6 @@ static struct iwl_base_params iwl5000_base_params = {
};
static struct iwl_ht_params iwl5000_ht_params = {
.ht_greenfield_support = true,
.use_rts_for_aggregation = true, /* use rts/cts protection */
};
#define IWL_DEVICE_5000 \

View File

@ -270,8 +270,6 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
ctx->active.channel, ch);
return -EFAULT;
}
priv->switch_rxon.channel = cmd.channel;
priv->switch_rxon.switch_in_progress = true;
return iwl_send_cmd_sync(priv, &hcmd);
}

View File

@ -163,17 +163,9 @@ static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
__le16 fc, __le32 *tx_flags)
{
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT ||
info->flags & IEEE80211_TX_CTL_AMPDU)
*tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
return;
}
if (priv->cfg->ht_params &&
priv->cfg->ht_params->use_rts_for_aggregation &&
info->flags & IEEE80211_TX_CTL_AMPDU) {
*tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
return;
}
}
/* Calc max signal level (dBm) among 3 possible receivers */

View File

@ -325,6 +325,14 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
return 0;
}
/*
* force CTS-to-self frames protection if RTS-CTS is not preferred
* one aggregation protection method
*/
if (!(priv->cfg->ht_params &&
priv->cfg->ht_params->use_rts_for_aggregation))
ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
@ -342,10 +350,10 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
* receive commit_rxon request
* abort any previous channel switch if still in process
*/
if (priv->switch_rxon.switch_in_progress &&
(priv->switch_rxon.channel != ctx->staging.channel)) {
if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
(priv->switch_channel != ctx->staging.channel)) {
IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
le16_to_cpu(priv->switch_rxon.channel));
le16_to_cpu(priv->switch_channel));
iwl_chswitch_done(priv, false);
}
@ -362,6 +370,11 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
}
memcpy(active, &ctx->staging, sizeof(*active));
/*
* We do not commit tx power settings while channel changing,
* do it now if after settings changed.
*/
iwl_set_tx_power(priv, priv->tx_power_next, false);
return 0;
}

View File

@ -2843,16 +2843,13 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
goto out;
if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
test_bit(STATUS_SCANNING, &priv->status))
test_bit(STATUS_SCANNING, &priv->status) ||
test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
goto out;
if (!iwl_is_associated_ctx(ctx))
goto out;
/* channel switch in progress */
if (priv->switch_rxon.switch_in_progress == true)
goto out;
if (priv->cfg->ops->lib->set_channel_switch) {
ch = channel->hw_value;
@ -2901,15 +2898,19 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
* at this point, staging_rxon has the
* configuration for channel switch
*/
set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
priv->switch_channel = cpu_to_le16(ch);
if (priv->cfg->ops->lib->set_channel_switch(priv,
ch_switch))
priv->switch_rxon.switch_in_progress = false;
ch_switch)) {
clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
&priv->status);
priv->switch_channel = 0;
ieee80211_chswitch_done(ctx->vif, false);
}
}
}
out:
mutex_unlock(&priv->mutex);
if (!priv->switch_rxon.switch_in_progress)
ieee80211_chswitch_done(ctx->vif, false);
IWL_DEBUG_MAC80211(priv, "leave\n");
}

View File

@ -843,12 +843,8 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
if (priv->switch_rxon.switch_in_progress) {
if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
ieee80211_chswitch_done(ctx->vif, is_success);
mutex_lock(&priv->mutex);
priv->switch_rxon.switch_in_progress = false;
mutex_unlock(&priv->mutex);
}
}
#ifdef CONFIG_IWLWIFI_DEBUG

View File

@ -560,6 +560,7 @@ void iwlcore_free_geos(struct iwl_priv *priv);
#define STATUS_POWER_PMI 16
#define STATUS_FW_ERROR 17
#define STATUS_DEVICE_ENABLED 18
#define STATUS_CHANNEL_SWITCH_PENDING 19
static inline int iwl_is_ready(struct iwl_priv *priv)

View File

@ -981,17 +981,6 @@ struct traffic_stats {
#endif
};
/*
* iwl_switch_rxon: "channel switch" structure
*
* @ switch_in_progress: channel switch in progress
* @ channel: new channel
*/
struct iwl_switch_rxon {
bool switch_in_progress;
__le16 channel;
};
/*
* schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
* to perform continuous uCode event logging operation if enabled
@ -1287,7 +1276,7 @@ struct iwl_priv {
struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
struct iwl_switch_rxon switch_rxon;
__le16 switch_channel;
struct {
u32 error_event_table;

View File

@ -250,19 +250,19 @@ static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
if (priv->switch_rxon.switch_in_progress) {
if (!le32_to_cpu(csa->status) &&
(csa->channel == priv->switch_rxon.channel)) {
rxon->channel = csa->channel;
ctx->staging.channel = csa->channel;
IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
return;
if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
rxon->channel = csa->channel;
ctx->staging.channel = csa->channel;
IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
le16_to_cpu(csa->channel));
iwl_chswitch_done(priv, true);
} else {
IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
le16_to_cpu(csa->channel));
iwl_chswitch_done(priv, false);
}
iwl_chswitch_done(priv, true);
} else {
IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
le16_to_cpu(csa->channel));
iwl_chswitch_done(priv, false);
}
}

View File

@ -907,7 +907,7 @@ static void if_sdio_interrupt(struct sdio_func *func)
card = sdio_get_drvdata(func);
cause = sdio_readb(card->func, IF_SDIO_H_INT_STATUS, &ret);
if (ret)
if (ret || !cause)
goto out;
lbs_deb_sdio("interrupt: 0x%X\n", (unsigned)cause);
@ -1008,10 +1008,6 @@ static int if_sdio_probe(struct sdio_func *func,
if (ret)
goto release;
ret = sdio_claim_irq(func, if_sdio_interrupt);
if (ret)
goto disable;
/* For 1-bit transfers to the 8686 model, we need to enable the
* interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0
* bit to allow access to non-vendor registers. */
@ -1082,6 +1078,21 @@ static int if_sdio_probe(struct sdio_func *func,
else
card->rx_unit = 0;
/*
* Set up the interrupt handler late.
*
* If we set it up earlier, the (buggy) hardware generates a spurious
* interrupt, even before the interrupt has been enabled, with
* CCCR_INTx = 0.
*
* We register the interrupt handler late so that we can handle any
* spurious interrupts, and also to avoid generation of that known
* spurious interrupt in the first place.
*/
ret = sdio_claim_irq(func, if_sdio_interrupt);
if (ret)
goto disable;
/*
* Enable interrupts now that everything is set up
*/

View File

@ -250,7 +250,8 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL)
rt2x00link_reset_tuner(rt2x00dev, false);
if (test_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags) &&
if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
test_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags) &&
(ieee80211_flags & IEEE80211_CONF_CHANGE_PS) &&
(conf->flags & IEEE80211_CONF_PS)) {
beacon_diff = (long)jiffies - (long)rt2x00dev->last_beacon;

View File

@ -146,6 +146,9 @@ static void rt2x00lib_autowakeup(struct work_struct *work)
struct rt2x00_dev *rt2x00dev =
container_of(work, struct rt2x00_dev, autowakeup_work.work);
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return;
if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE))
ERROR(rt2x00dev, "Device failed to wakeup.\n");
clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags);
@ -1160,6 +1163,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
* Stop all work.
*/
cancel_work_sync(&rt2x00dev->intf_work);
cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
if (rt2x00_is_usb(rt2x00dev)) {
del_timer_sync(&rt2x00dev->txstatus_timer);
cancel_work_sync(&rt2x00dev->rxdone_work);

View File

@ -669,6 +669,19 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
&rx_status,
(u8 *) pdesc, skb);
new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
if (unlikely(!new_skb)) {
RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
DBG_DMESG,
("can't alloc skb for rx\n"));
goto done;
}
pci_unmap_single(rtlpci->pdev,
*((dma_addr_t *) skb->cb),
rtlpci->rxbuffersize,
PCI_DMA_FROMDEVICE);
skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
false,
HW_DESC_RXPKT_LEN));
@ -685,22 +698,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
hdr = rtl_get_hdr(skb);
fc = rtl_get_fc(skb);
/* try for new buffer - if allocation fails, drop
* frame and reuse old buffer
*/
new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
if (unlikely(!new_skb)) {
RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
DBG_DMESG,
("can't alloc skb for rx\n"));
goto done;
}
pci_unmap_single(rtlpci->pdev,
*((dma_addr_t *) skb->cb),
rtlpci->rxbuffersize,
PCI_DMA_FROMDEVICE);
if (!stats.crc || !stats.hwerror) {
if (!stats.crc && !stats.hwerror) {
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
sizeof(rx_status));

View File

@ -539,10 +539,12 @@ void ssb_pcicore_init(struct ssb_pcicore *pc)
if (!pc->hostmode)
ssb_pcicore_init_clientmode(pc);
/* Additional always once-executed workarounds */
ssb_pcicore_serdes_workaround(pc);
/* TODO: ASPM */
/* TODO: Clock Request Update */
/* Additional PCIe always once-executed workarounds */
if (dev->id.coreid == SSB_DEV_PCIE) {
ssb_pcicore_serdes_workaround(pc);
/* TODO: ASPM */
/* TODO: Clock Request Update */
}
}
static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address)

View File

@ -268,7 +268,7 @@ struct ethtool_pauseparam {
__u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */
/* If the link is being auto-negotiated (via ethtool_cmd.autoneg
* being true) the user may set 'autonet' here non-zero to have the
* being true) the user may set 'autoneg' here non-zero to have the
* pause parameters be auto-negotiated too. In such a case, the
* {rx,tx}_pause values below determine what capabilities are
* advertised.
@ -811,7 +811,7 @@ bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported);
* @get_tx_csum: Deprecated as redundant. Report whether transmit checksums
* are turned on or off.
* @set_tx_csum: Deprecated in favour of generic netdev features. Turn
* transmit checksums on or off. Returns a egative error code or zero.
* transmit checksums on or off. Returns a negative error code or zero.
* @get_sg: Deprecated as redundant. Report whether scatter-gather is
* enabled.
* @set_sg: Deprecated in favour of generic netdev features. Turn
@ -1087,7 +1087,7 @@ struct ethtool_ops {
/* The following are all involved in forcing a particular link
* mode for the device for setting things. When getting the
* devices settings, these indicate the current mode and whether
* it was foced up into this mode or autonegotiated.
* it was forced up into this mode or autonegotiated.
*/
/* The forced speed, 10Mb, 100Mb, gigabit, 2.5Gb, 10GbE. */

View File

@ -62,6 +62,7 @@ struct tpacket_auxdata {
__u16 tp_mac;
__u16 tp_net;
__u16 tp_vlan_tci;
__u16 tp_padding;
};
/* Rx ring - header status */
@ -101,6 +102,7 @@ struct tpacket2_hdr {
__u32 tp_sec;
__u32 tp_nsec;
__u16 tp_vlan_tci;
__u16 tp_padding;
};
#define TPACKET2_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket2_hdr)) + sizeof(struct sockaddr_ll))

View File

@ -225,7 +225,7 @@ static inline int vlan_hwaccel_receive_skb(struct sk_buff *skb,
}
/**
* __vlan_put_tag - regular VLAN tag inserting
* vlan_insert_tag - regular VLAN tag inserting
* @skb: skbuff to tag
* @vlan_tci: VLAN TCI to insert
*
@ -234,8 +234,10 @@ static inline int vlan_hwaccel_receive_skb(struct sk_buff *skb,
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
*
* Does not change skb->protocol so this function can be used during receive.
*/
static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, u16 vlan_tci)
{
struct vlan_ethhdr *veth;
@ -255,8 +257,25 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
/* now, the TCI */
veth->h_vlan_TCI = htons(vlan_tci);
skb->protocol = htons(ETH_P_8021Q);
return skb;
}
/**
* __vlan_put_tag - regular VLAN tag inserting
* @skb: skbuff to tag
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
* Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
*/
static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
{
skb = vlan_insert_tag(skb, vlan_tci);
if (skb)
skb->protocol = htons(ETH_P_8021Q);
return skb;
}

View File

@ -2555,7 +2555,7 @@ extern void netdev_class_remove_file(struct class_attribute *class_attr);
extern struct kobj_ns_type_operations net_ns_type_operations;
extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len);
extern const char *netdev_drivername(const struct net_device *dev);
extern void linkwatch_run_queue(void);

View File

@ -18,6 +18,9 @@ enum ip_conntrack_info {
/* >= this indicates reply direction */
IP_CT_IS_REPLY,
IP_CT_ESTABLISHED_REPLY = IP_CT_ESTABLISHED + IP_CT_IS_REPLY,
IP_CT_RELATED_REPLY = IP_CT_RELATED + IP_CT_IS_REPLY,
IP_CT_NEW_REPLY = IP_CT_NEW + IP_CT_IS_REPLY,
/* Number of distinct IP_CT types (no NEW in reply dirn). */
IP_CT_NUMBER = IP_CT_IS_REPLY * 2 - 1
};

View File

@ -1256,6 +1256,11 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
skb->tail += len;
}
static inline void skb_reset_mac_len(struct sk_buff *skb)
{
skb->mac_len = skb->network_header - skb->mac_header;
}
#ifdef NET_SKBUFF_DATA_USES_OFFSET
static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
{

View File

@ -23,6 +23,31 @@ bool vlan_do_receive(struct sk_buff **skbp)
return false;
skb->dev = vlan_dev;
if (skb->pkt_type == PACKET_OTHERHOST) {
/* Our lower layer thinks this is not local, let's make sure.
* This allows the VLAN to have a different MAC than the
* underlying device, and still route correctly. */
if (!compare_ether_addr(eth_hdr(skb)->h_dest,
vlan_dev->dev_addr))
skb->pkt_type = PACKET_HOST;
}
if (!(vlan_dev_info(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
unsigned int offset = skb->data - skb_mac_header(skb);
/*
* vlan_insert_tag expect skb->data pointing to mac header.
* So change skb->data before calling it and change back to
* original position later
*/
skb_push(skb, offset);
skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci);
if (!skb)
return false;
skb_pull(skb, offset + VLAN_HLEN);
skb_reset_mac_len(skb);
}
skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
skb->vlan_tci = 0;
@ -31,22 +56,8 @@ bool vlan_do_receive(struct sk_buff **skbp)
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->rx_packets++;
rx_stats->rx_bytes += skb->len;
switch (skb->pkt_type) {
case PACKET_BROADCAST:
break;
case PACKET_MULTICAST:
if (skb->pkt_type == PACKET_MULTICAST)
rx_stats->rx_multicast++;
break;
case PACKET_OTHERHOST:
/* Our lower layer thinks this is not local, let's make sure.
* This allows the VLAN to have a different MAC than the
* underlying device, and still route correctly. */
if (!compare_ether_addr(eth_hdr(skb)->h_dest,
vlan_dev->dev_addr))
skb->pkt_type = PACKET_HOST;
break;
}
u64_stats_update_end(&rx_stats->syncp);
return true;
@ -89,18 +100,13 @@ gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
}
EXPORT_SYMBOL(vlan_gro_frags);
static struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
{
if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) {
if (skb_cow(skb, skb_headroom(skb)) < 0)
skb = NULL;
if (skb) {
/* Lifted from Gleb's VLAN code... */
memmove(skb->data - ETH_HLEN,
skb->data - VLAN_ETH_HLEN, 12);
skb->mac_header += VLAN_HLEN;
}
}
if (skb_cow(skb, skb_headroom(skb)) < 0)
return NULL;
memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
skb->mac_header += VLAN_HLEN;
skb_reset_mac_len(skb);
return skb;
}
@ -161,7 +167,7 @@ struct sk_buff *vlan_untag(struct sk_buff *skb)
skb_pull_rcsum(skb, VLAN_HLEN);
vlan_set_encap_proto(skb, vhdr);
skb = vlan_check_reorder_header(skb);
skb = vlan_reorder_header(skb);
if (unlikely(!skb))
goto err_free;

View File

@ -104,10 +104,16 @@ static void fake_update_pmtu(struct dst_entry *dst, u32 mtu)
{
}
static u32 *fake_cow_metrics(struct dst_entry *dst, unsigned long old)
{
return NULL;
}
static struct dst_ops fake_dst_ops = {
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
.update_pmtu = fake_update_pmtu,
.cow_metrics = fake_cow_metrics,
};
/*

View File

@ -3114,7 +3114,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
skb->mac_len = skb->network_header - skb->mac_header;
skb_reset_mac_len(skb);
pt_prev = NULL;
@ -6178,6 +6178,11 @@ static int dev_cpu_callback(struct notifier_block *nfb,
oldsd->output_queue = NULL;
oldsd->output_queue_tailp = &oldsd->output_queue;
}
/* Append NAPI poll list from offline CPU. */
if (!list_empty(&oldsd->poll_list)) {
list_splice_init(&oldsd->poll_list, &sd->poll_list);
raise_softirq_irqoff(NET_RX_SOFTIRQ);
}
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
@ -6264,29 +6269,23 @@ static int __net_init netdev_init(struct net *net)
/**
* netdev_drivername - network driver for the device
* @dev: network device
* @buffer: buffer for resulting name
* @len: size of buffer
*
* Determine network driver for device.
*/
char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
const char *netdev_drivername(const struct net_device *dev)
{
const struct device_driver *driver;
const struct device *parent;
if (len <= 0 || !buffer)
return buffer;
buffer[0] = 0;
const char *empty = "";
parent = dev->dev.parent;
if (!parent)
return buffer;
return empty;
driver = parent->driver;
if (driver && driver->name)
strlcpy(buffer, driver->name, len);
return buffer;
return driver->name;
return empty;
}
static int __netdev_printk(const char *level, const struct net_device *dev,

View File

@ -310,19 +310,17 @@ struct net *get_net_ns_by_fd(int fd)
struct file *file;
struct net *net;
net = ERR_PTR(-EINVAL);
file = proc_ns_fget(fd);
if (!file)
goto out;
if (IS_ERR(file))
return ERR_CAST(file);
ei = PROC_I(file->f_dentry->d_inode);
if (ei->ns_ops != &netns_operations)
goto out;
if (ei->ns_ops == &netns_operations)
net = get_net(ei->ns);
else
net = ERR_PTR(-EINVAL);
net = get_net(ei->ns);
out:
if (file)
fput(file);
fput(file);
return net;
}

View File

@ -792,6 +792,13 @@ int netpoll_setup(struct netpoll *np)
return -ENODEV;
}
if (ndev->master) {
printk(KERN_ERR "%s: %s is a slave device, aborting.\n",
np->name, np->dev_name);
err = -EBUSY;
goto put;
}
if (!netif_running(ndev)) {
unsigned long atmost, atleast;

View File

@ -799,7 +799,9 @@ static int __ip_append_data(struct sock *sk,
int csummode = CHECKSUM_NONE;
struct rtable *rt = (struct rtable *)cork->dst;
exthdrlen = transhdrlen ? rt->dst.header_len : 0;
skb = skb_peek_tail(queue);
exthdrlen = !skb ? rt->dst.header_len : 0;
length += exthdrlen;
transhdrlen += exthdrlen;
mtu = cork->fragsize;
@ -825,8 +827,6 @@ static int __ip_append_data(struct sock *sk,
!exthdrlen)
csummode = CHECKSUM_PARTIAL;
skb = skb_peek_tail(queue);
cork->length += length;
if (((length > mtu) || (skb && skb_is_gso(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&

View File

@ -402,7 +402,8 @@ ipq_dev_drop(int ifindex)
static inline void
__ipq_rcv_skb(struct sk_buff *skb)
{
int status, type, pid, flags, nlmsglen, skblen;
int status, type, pid, flags;
unsigned int nlmsglen, skblen;
struct nlmsghdr *nlh;
skblen = skb->len;

View File

@ -307,7 +307,7 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
* error messages (RELATED) and information requests (see below) */
if (ip_hdr(skb)->protocol == IPPROTO_ICMP &&
(ctinfo == IP_CT_RELATED ||
ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY))
ctinfo == IP_CT_RELATED_REPLY))
return XT_CONTINUE;
/* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO,
@ -321,12 +321,12 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
ct->mark = hash;
break;
case IP_CT_RELATED:
case IP_CT_RELATED+IP_CT_IS_REPLY:
case IP_CT_RELATED_REPLY:
/* FIXME: we don't handle expectations at the
* moment. they can arrive on a different node than
* the master connection (e.g. FTP passive mode) */
case IP_CT_ESTABLISHED:
case IP_CT_ESTABLISHED+IP_CT_IS_REPLY:
case IP_CT_ESTABLISHED_REPLY:
break;
default:
break;

View File

@ -60,7 +60,7 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
nat = nfct_nat(ct);
NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY));
ctinfo == IP_CT_RELATED_REPLY));
/* Source address is 0.0.0.0 - locally generated packet that is
* probably not supposed to be masqueraded.

View File

@ -101,7 +101,7 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
/* This is where we call the helper: as the packet goes out. */
ct = nf_ct_get(skb, &ctinfo);
if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)
if (!ct || ctinfo == IP_CT_RELATED_REPLY)
goto out;
help = nfct_help(ct);

View File

@ -160,7 +160,7 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
/* Update skb to refer to this connection */
skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
skb->nfctinfo = *ctinfo;
return -NF_ACCEPT;
return NF_ACCEPT;
}
/* Small and modified version of icmp_rcv */

View File

@ -433,7 +433,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
/* Must be RELATED */
NF_CT_ASSERT(skb->nfctinfo == IP_CT_RELATED ||
skb->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY);
skb->nfctinfo == IP_CT_RELATED_REPLY);
/* Redirects on non-null nats must be dropped, else they'll
start talking to each other without our translation, and be

View File

@ -160,7 +160,7 @@ static void nf_nat_csum(struct sk_buff *skb, const struct iphdr *iph, void *data
if (skb->ip_summed != CHECKSUM_PARTIAL) {
if (!(rt->rt_flags & RTCF_LOCAL) &&
skb->dev->features & NETIF_F_V4_CSUM) {
(!skb->dev || skb->dev->features & NETIF_F_V4_CSUM)) {
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = skb_headroom(skb) +
skb_network_offset(skb) +

View File

@ -53,7 +53,7 @@ ipt_snat_target(struct sk_buff *skb, const struct xt_action_param *par)
/* Connection must be valid and new. */
NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY));
ctinfo == IP_CT_RELATED_REPLY));
NF_CT_ASSERT(par->out != NULL);
return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_SRC);

View File

@ -116,7 +116,7 @@ nf_nat_fn(unsigned int hooknum,
switch (ctinfo) {
case IP_CT_RELATED:
case IP_CT_RELATED+IP_CT_IS_REPLY:
case IP_CT_RELATED_REPLY:
if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
if (!nf_nat_icmp_reply_translation(ct, ctinfo,
hooknum, skb))
@ -144,7 +144,7 @@ nf_nat_fn(unsigned int hooknum,
default:
/* ESTABLISHED */
NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
ctinfo == (IP_CT_ESTABLISHED+IP_CT_IS_REPLY));
ctinfo == IP_CT_ESTABLISHED_REPLY);
}
return nf_nat_packet(ct, ctinfo, hooknum, skb);

View File

@ -1316,6 +1316,23 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
;
}
static bool peer_pmtu_expired(struct inet_peer *peer)
{
unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
return orig &&
time_after_eq(jiffies, orig) &&
cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
}
static bool peer_pmtu_cleaned(struct inet_peer *peer)
{
unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
return orig &&
cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
}
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
{
struct rtable *rt = (struct rtable *)dst;
@ -1331,14 +1348,8 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
rt_genid(dev_net(dst->dev)));
rt_del(hash, rt);
ret = NULL;
} else if (rt->peer &&
rt->peer->pmtu_expires &&
time_after_eq(jiffies, rt->peer->pmtu_expires)) {
unsigned long orig = rt->peer->pmtu_expires;
if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
dst_metric_set(dst, RTAX_MTU,
rt->peer->pmtu_orig);
} else if (rt->peer && peer_pmtu_expired(rt->peer)) {
dst_metric_set(dst, RTAX_MTU, rt->peer->pmtu_orig);
}
}
return ret;
@ -1531,8 +1542,10 @@ unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
{
unsigned long expires = peer->pmtu_expires;
unsigned long expires = ACCESS_ONCE(peer->pmtu_expires);
if (!expires)
return;
if (time_before(jiffies, expires)) {
u32 orig_dst_mtu = dst_mtu(dst);
if (peer->pmtu_learned < orig_dst_mtu) {
@ -1555,10 +1568,11 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
rt_bind_peer(rt, rt->rt_dst, 1);
peer = rt->peer;
if (peer) {
unsigned long pmtu_expires = ACCESS_ONCE(peer->pmtu_expires);
if (mtu < ip_rt_min_pmtu)
mtu = ip_rt_min_pmtu;
if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
unsigned long pmtu_expires;
if (!pmtu_expires || mtu < peer->pmtu_learned) {
pmtu_expires = jiffies + ip_rt_mtu_expires;
if (!pmtu_expires)
@ -1612,13 +1626,14 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
rt_bind_peer(rt, rt->rt_dst, 0);
peer = rt->peer;
if (peer && peer->pmtu_expires)
if (peer) {
check_peer_pmtu(dst, peer);
if (peer && peer->redirect_learned.a4 &&
peer->redirect_learned.a4 != rt->rt_gateway) {
if (check_peer_redir(dst, peer))
return NULL;
if (peer->redirect_learned.a4 &&
peer->redirect_learned.a4 != rt->rt_gateway) {
if (check_peer_redir(dst, peer))
return NULL;
}
}
rt->rt_peer_genid = rt_peer_genid();
@ -1649,14 +1664,8 @@ static void ipv4_link_failure(struct sk_buff *skb)
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
rt = skb_rtable(skb);
if (rt &&
rt->peer &&
rt->peer->pmtu_expires) {
unsigned long orig = rt->peer->pmtu_expires;
if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
}
if (rt && rt->peer && peer_pmtu_cleaned(rt->peer))
dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
}
static int ip_rt_bug(struct sk_buff *skb)
@ -1770,8 +1779,7 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
sizeof(u32) * RTAX_MAX);
dst_init_metrics(&rt->dst, peer->metrics, false);
if (peer->pmtu_expires)
check_peer_pmtu(&rt->dst, peer);
check_peer_pmtu(&rt->dst, peer);
if (peer->redirect_learned.a4 &&
peer->redirect_learned.a4 != rt->rt_gateway) {
rt->rt_gateway = peer->redirect_learned.a4;
@ -2775,7 +2783,8 @@ static int rt_fill_info(struct net *net,
struct rtable *rt = skb_rtable(skb);
struct rtmsg *r;
struct nlmsghdr *nlh;
long expires;
long expires = 0;
const struct inet_peer *peer = rt->peer;
u32 id = 0, ts = 0, tsage = 0, error;
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
@ -2823,15 +2832,16 @@ static int rt_fill_info(struct net *net,
NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark);
error = rt->dst.error;
expires = (rt->peer && rt->peer->pmtu_expires) ?
rt->peer->pmtu_expires - jiffies : 0;
if (rt->peer) {
if (peer) {
inet_peer_refcheck(rt->peer);
id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
if (rt->peer->tcp_ts_stamp) {
ts = rt->peer->tcp_ts;
tsage = get_seconds() - rt->peer->tcp_ts_stamp;
id = atomic_read(&peer->ip_id_count) & 0xffff;
if (peer->tcp_ts_stamp) {
ts = peer->tcp_ts;
tsage = get_seconds() - peer->tcp_ts_stamp;
}
expires = ACCESS_ONCE(peer->pmtu_expires);
if (expires)
expires -= jiffies;
}
if (rt_is_input_route(rt)) {

View File

@ -272,6 +272,10 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
if (addr->sin6_family != AF_INET6)
return -EINVAL;
addr_type = ipv6_addr_type(&addr->sin6_addr);
if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM)
return -EINVAL;

View File

@ -403,7 +403,8 @@ ipq_dev_drop(int ifindex)
static inline void
__ipq_rcv_skb(struct sk_buff *skb)
{
int status, type, pid, flags, nlmsglen, skblen;
int status, type, pid, flags;
unsigned int nlmsglen, skblen;
struct nlmsghdr *nlh;
skblen = skb->len;

View File

@ -160,7 +160,7 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
/* This is where we call the helper: as the packet goes out. */
ct = nf_ct_get(skb, &ctinfo);
if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)
if (!ct || ctinfo == IP_CT_RELATED_REPLY)
goto out;
help = nfct_help(ct);

View File

@ -177,7 +177,7 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
/* Update skb to refer to this connection */
skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
skb->nfctinfo = *ctinfo;
return -NF_ACCEPT;
return NF_ACCEPT;
}
static int

View File

@ -87,6 +87,8 @@ static inline void iriap_start_watchdog_timer(struct iriap_cb *self,
iriap_watchdog_timer_expired);
}
static struct lock_class_key irias_objects_key;
/*
* Function iriap_init (void)
*
@ -114,6 +116,9 @@ int __init iriap_init(void)
return -ENOMEM;
}
lockdep_set_class_and_name(&irias_objects->hb_spinlock, &irias_objects_key,
"irias_objects");
/*
* Register some default services for IrLMP
*/

View File

@ -258,7 +258,7 @@ static int l2tp_dfs_seq_open(struct inode *inode, struct file *file)
*/
pd->net = get_net_ns_by_pid(current->pid);
if (IS_ERR(pd->net)) {
rc = -PTR_ERR(pd->net);
rc = PTR_ERR(pd->net);
goto err_free_pd;
}

View File

@ -965,6 +965,10 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
mutex_lock(&sdata->u.ibss.mtx);
sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH;
memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
sdata->u.ibss.ssid_len = 0;
active_ibss = ieee80211_sta_active_ibss(sdata);
if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
@ -999,8 +1003,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
kfree_skb(skb);
skb_queue_purge(&sdata->skb_queue);
memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
sdata->u.ibss.ssid_len = 0;
del_timer_sync(&sdata->u.ibss.timer);

View File

@ -775,9 +775,6 @@ struct ieee80211_local {
int tx_headroom; /* required headroom for hardware/radiotap */
/* count for keys needing tailroom space allocation */
int crypto_tx_tailroom_needed_cnt;
/* Tasklet and skb queue to process calls from IRQ mode. All frames
* added to skb_queue will be processed, but frames in
* skb_queue_unreliable may be dropped if the total length of these

View File

@ -1145,6 +1145,10 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
+ IEEE80211_ENCRYPT_HEADROOM;
ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM;
ret = dev_alloc_name(ndev, ndev->name);
if (ret < 0)
goto fail;
ieee80211_assign_perm_addr(local, ndev, type);
memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));

View File

@ -101,11 +101,6 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
if (!ret) {
key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
key->local->crypto_tx_tailroom_needed_cnt--;
return 0;
}
@ -161,10 +156,6 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
key->local->crypto_tx_tailroom_needed_cnt++;
}
void ieee80211_key_removed(struct ieee80211_key_conf *key_conf)
@ -403,10 +394,8 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key)
ieee80211_aes_key_free(key->u.ccmp.tfm);
if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC)
ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
if (key->local) {
if (key->local)
ieee80211_debugfs_key_remove(key);
key->local->crypto_tx_tailroom_needed_cnt--;
}
kfree(key);
}
@ -468,8 +457,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
ieee80211_debugfs_key_add(key);
key->local->crypto_tx_tailroom_needed_cnt++;
ret = ieee80211_key_enable_hw_accel(key);
mutex_unlock(&sdata->local->key_mtx);
@ -511,12 +498,8 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
mutex_lock(&sdata->local->key_mtx);
sdata->local->crypto_tx_tailroom_needed_cnt = 0;
list_for_each_entry(key, &sdata->key_list, list) {
sdata->local->crypto_tx_tailroom_needed_cnt++;
list_for_each_entry(key, &sdata->key_list, list)
ieee80211_key_enable_hw_accel(key);
}
mutex_unlock(&sdata->local->key_mtx);
}

View File

@ -232,9 +232,6 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type));
}
ieee80211_stop_queues_by_reason(&sdata->local->hw,
IEEE80211_QUEUE_STOP_REASON_CSA);
/* channel_type change automatically detected */
ieee80211_hw_config(local, 0);
@ -248,9 +245,6 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
rcu_read_unlock();
}
ieee80211_wake_queues_by_reason(&sdata->local->hw,
IEEE80211_QUEUE_STOP_REASON_CSA);
ht_opmode = le16_to_cpu(hti->operation_mode);
/* if bss configuration changed store the new one */

View File

@ -1480,7 +1480,12 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
{
int tail_need = 0;
if (may_encrypt && local->crypto_tx_tailroom_needed_cnt) {
/*
* This could be optimised, devices that do full hardware
* crypto (including TKIP MMIC) need no tailroom... But we
* have no drivers for such devices currently.
*/
if (may_encrypt) {
tail_need = IEEE80211_ENCRYPT_TAILROOM;
tail_need -= skb_tailroom(skb);
tail_need = max_t(int, tail_need, 0);

View File

@ -767,7 +767,7 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
if (!attr[IPSET_ATTR_SETNAME]) {
for (i = 0; i < ip_set_max; i++) {
if (ip_set_list[i] != NULL && ip_set_list[i]->ref) {
ret = IPSET_ERR_BUSY;
ret = -IPSET_ERR_BUSY;
goto out;
}
}

View File

@ -146,8 +146,9 @@ hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
{
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet4_elem data =
{ .cidr = h->nets[0].cidr || HOST_MASK };
struct hash_ipportnet4_elem data = {
.cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
};
if (data.cidr == 0)
return -EINVAL;
@ -394,8 +395,9 @@ hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
{
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet6_elem data =
{ .cidr = h->nets[0].cidr || HOST_MASK };
struct hash_ipportnet6_elem data = {
.cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
};
if (data.cidr == 0)
return -EINVAL;

View File

@ -131,7 +131,9 @@ hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb,
{
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_net4_elem data = { .cidr = h->nets[0].cidr || HOST_MASK };
struct hash_net4_elem data = {
.cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
};
if (data.cidr == 0)
return -EINVAL;
@ -296,7 +298,9 @@ hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb,
{
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_net6_elem data = { .cidr = h->nets[0].cidr || HOST_MASK };
struct hash_net6_elem data = {
.cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
};
if (data.cidr == 0)
return -EINVAL;

View File

@ -144,7 +144,8 @@ hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport4_elem data = {
.cidr = h->nets[0].cidr || HOST_MASK };
.cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
};
if (data.cidr == 0)
return -EINVAL;
@ -357,7 +358,8 @@ hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport6_elem data = {
.cidr = h->nets[0].cidr || HOST_MASK };
.cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
};
if (data.cidr == 0)
return -EINVAL;

View File

@ -1772,7 +1772,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_LOCAL_IN,
.priority = 99,
.priority = NF_IP_PRI_NAT_SRC - 2,
},
/* After packet filtering, forward packet through VS/DR, VS/TUN,
* or VS/NAT(change destination), so that filtering rules can be
@ -1782,7 +1782,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_LOCAL_IN,
.priority = 101,
.priority = NF_IP_PRI_NAT_SRC - 1,
},
/* Before ip_vs_in, change source only for VS/NAT */
{
@ -1790,7 +1790,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_LOCAL_OUT,
.priority = -99,
.priority = NF_IP_PRI_NAT_DST + 1,
},
/* After mangle, schedule and forward local requests */
{
@ -1798,7 +1798,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_LOCAL_OUT,
.priority = -98,
.priority = NF_IP_PRI_NAT_DST + 2,
},
/* After packet filtering (but before ip_vs_out_icmp), catch icmp
* destined for 0.0.0.0/0, which is for incoming IPVS connections */
@ -1824,7 +1824,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
.owner = THIS_MODULE,
.pf = PF_INET6,
.hooknum = NF_INET_LOCAL_IN,
.priority = 99,
.priority = NF_IP6_PRI_NAT_SRC - 2,
},
/* After packet filtering, forward packet through VS/DR, VS/TUN,
* or VS/NAT(change destination), so that filtering rules can be
@ -1834,7 +1834,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
.owner = THIS_MODULE,
.pf = PF_INET6,
.hooknum = NF_INET_LOCAL_IN,
.priority = 101,
.priority = NF_IP6_PRI_NAT_SRC - 1,
},
/* Before ip_vs_in, change source only for VS/NAT */
{
@ -1842,7 +1842,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_LOCAL_OUT,
.priority = -99,
.priority = NF_IP6_PRI_NAT_DST + 1,
},
/* After mangle, schedule and forward local requests */
{
@ -1850,7 +1850,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
.owner = THIS_MODULE,
.pf = PF_INET6,
.hooknum = NF_INET_LOCAL_OUT,
.priority = -98,
.priority = NF_IP6_PRI_NAT_DST + 2,
},
/* After packet filtering (but before ip_vs_out_icmp), catch icmp
* destined for 0.0.0.0/0, which is for incoming IPVS connections */

View File

@ -850,7 +850,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
/* It exists; we have (non-exclusive) reference. */
if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
*ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
*ctinfo = IP_CT_ESTABLISHED_REPLY;
/* Please set reply bit if this packet OK */
*set_reply = 1;
} else {
@ -922,6 +922,9 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
ret = -ret;
goto out;
}
/* ICMP[v6] protocol trackers may assign one conntrack. */
if (skb->nfct)
goto out;
}
ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
@ -1143,7 +1146,7 @@ static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
/* This ICMP is in reverse direction to the packet which caused it */
ct = nf_ct_get(skb, &ctinfo);
if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
ctinfo = IP_CT_RELATED_REPLY;
else
ctinfo = IP_CT_RELATED;

View File

@ -368,7 +368,7 @@ static int help(struct sk_buff *skb,
/* Until there's been traffic both ways, don't look in packets. */
if (ctinfo != IP_CT_ESTABLISHED &&
ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
ctinfo != IP_CT_ESTABLISHED_REPLY) {
pr_debug("ftp: Conntrackinfo = %u\n", ctinfo);
return NF_ACCEPT;
}

View File

@ -571,10 +571,9 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff,
int ret;
/* Until there's been traffic both ways, don't look in packets. */
if (ctinfo != IP_CT_ESTABLISHED &&
ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
return NF_ACCEPT;
}
pr_debug("nf_ct_h245: skblen = %u\n", skb->len);
spin_lock_bh(&nf_h323_lock);
@ -1125,10 +1124,9 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff,
int ret;
/* Until there's been traffic both ways, don't look in packets. */
if (ctinfo != IP_CT_ESTABLISHED &&
ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
return NF_ACCEPT;
}
pr_debug("nf_ct_q931: skblen = %u\n", skb->len);
spin_lock_bh(&nf_h323_lock);

View File

@ -125,8 +125,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
return NF_ACCEPT;
/* Until there's been traffic both ways, don't look in packets. */
if (ctinfo != IP_CT_ESTABLISHED &&
ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
return NF_ACCEPT;
/* Not a full tcp header? */

View File

@ -519,8 +519,7 @@ conntrack_pptp_help(struct sk_buff *skb, unsigned int protoff,
u_int16_t msg;
/* don't do any tracking before tcp handshake complete */
if (ctinfo != IP_CT_ESTABLISHED &&
ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
return NF_ACCEPT;
nexthdr_off = protoff;

View File

@ -78,7 +78,7 @@ static int help(struct sk_buff *skb,
ct_sane_info = &nfct_help(ct)->help.ct_sane_info;
/* Until there's been traffic both ways, don't look in packets. */
if (ctinfo != IP_CT_ESTABLISHED &&
ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY)
ctinfo != IP_CT_ESTABLISHED_REPLY)
return NF_ACCEPT;
/* Not a full tcp header? */

View File

@ -1423,7 +1423,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
if (ctinfo != IP_CT_ESTABLISHED &&
ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
ctinfo != IP_CT_ESTABLISHED_REPLY)
return NF_ACCEPT;
/* No Data ? */

View File

@ -143,9 +143,9 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
ct = nf_ct_get(skb, &ctinfo);
if (ct && !nf_ct_is_untracked(ct) &&
((iph->protocol != IPPROTO_ICMP &&
ctinfo == IP_CT_IS_REPLY + IP_CT_ESTABLISHED) ||
ctinfo == IP_CT_ESTABLISHED_REPLY) ||
(iph->protocol == IPPROTO_ICMP &&
ctinfo == IP_CT_IS_REPLY + IP_CT_RELATED)) &&
ctinfo == IP_CT_RELATED_REPLY)) &&
(ct->status & IPS_SRC_NAT_DONE)) {
daddr = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;

View File

@ -804,6 +804,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
} else {
h.h2->tp_vlan_tci = 0;
}
h.h2->tp_padding = 0;
hdrlen = sizeof(*h.h2);
break;
default:
@ -1736,6 +1737,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
} else {
aux.tp_vlan_tci = 0;
}
aux.tp_padding = 0;
put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
}

View File

@ -251,9 +251,8 @@ static void dev_watchdog(unsigned long arg)
}
if (some_queue_timedout) {
char drivername[64];
WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
dev->name, netdev_drivername(dev, drivername, 64), i);
dev->name, netdev_drivername(dev), i);
dev->netdev_ops->ndo_tx_timeout(dev);
}
if (!mod_timer(&dev->watchdog_timer,

View File

@ -3406,11 +3406,11 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
i = 0;
if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) {
request->ssids[i].ssid_len = nla_len(attr);
if (request->ssids[i].ssid_len > IEEE80211_MAX_SSID_LEN) {
if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) {
err = -EINVAL;
goto out_free;
}
request->ssids[i].ssid_len = nla_len(attr);
memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr));
i++;
}
@ -3572,12 +3572,11 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS],
tmp) {
request->ssids[i].ssid_len = nla_len(attr);
if (request->ssids[i].ssid_len >
IEEE80211_MAX_SSID_LEN) {
if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) {
err = -EINVAL;
goto out_free;
}
request->ssids[i].ssid_len = nla_len(attr);
memcpy(request->ssids[i].ssid, nla_data(attr),
nla_len(attr));
i++;

View File

@ -265,7 +265,7 @@ static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)
bitnr = bitnr & 0x1F;
replay_esn->bmp[nr] |= (1U << bitnr);
} else {
nr = replay_esn->replay_window >> 5;
nr = (replay_esn->replay_window - 1) >> 5;
for (i = 0; i <= nr; i++)
replay_esn->bmp[i] = 0;
@ -471,7 +471,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
bitnr = bitnr & 0x1F;
replay_esn->bmp[nr] |= (1U << bitnr);
} else {
nr = replay_esn->replay_window >> 5;
nr = (replay_esn->replay_window - 1) >> 5;
for (i = 0; i <= nr; i++)
replay_esn->bmp[i] = 0;