Merge branch 'master' into for-linus

This commit is contained in:
Rafael J. Wysocki 2009-08-18 23:37:37 +02:00
commit af15c1addf
149 changed files with 1893 additions and 870 deletions

View File

@ -904,7 +904,7 @@ F: drivers/input/misc/ati_remote2.c
ATLX ETHERNET DRIVERS
M: Jay Cliburn <jcliburn@gmail.com>
M: Chris Snook <csnook@redhat.com>
M: Chris Snook <chris.snook@gmail.com>
M: Jie Yang <jie.yang@atheros.com>
L: atl1-devel@lists.sourceforge.net
W: http://sourceforge.net/projects/atl1
@ -3563,6 +3563,9 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git
S: Maintained
F: net/
F: include/net/
F: include/linux/in.h
F: include/linux/net.h
F: include/linux/netdevice.h
NETWORKING [IPv4/IPv6]
M: "David S. Miller" <davem@davemloft.net>
@ -3598,6 +3601,8 @@ W: http://www.linuxfoundation.org/en/Net
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git
S: Odd Fixes
F: drivers/net/
F: include/linux/if_*
F: include/linux/*device.h
NETXEN (1/10) GbE SUPPORT
M: Dhananjay Phadke <dhananjay@netxen.com>

View File

@ -282,7 +282,7 @@ CONFIG_ALIGNMENT_TRAP=y
#
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CMDLINE="init=/sbin/preinit ubi.mtd=rootfs root=ubi0:rootfs rootfstype=ubifs rootflags=bulk_read,no_chk_data_crc rw console=ttyMTD,log console=tty0"
CONFIG_CMDLINE="init=/sbin/preinit ubi.mtd=rootfs root=ubi0:rootfs rootfstype=ubifs rootflags=bulk_read,no_chk_data_crc rw console=ttyMTD,log console=tty0 console=ttyS2,115200n8"
# CONFIG_XIP_KERNEL is not set
# CONFIG_KEXEC is not set
@ -1354,7 +1354,7 @@ CONFIG_USB_OTG_UTILS=y
# CONFIG_USB_GPIO_VBUS is not set
# CONFIG_ISP1301_OMAP is not set
CONFIG_TWL4030_USB=y
CONFIG_MMC=m
CONFIG_MMC=y
# CONFIG_MMC_DEBUG is not set
# CONFIG_MMC_UNSAFE_RESUME is not set
@ -1449,7 +1449,8 @@ CONFIG_RTC_DRV_TWL4030=m
# on-CPU RTC drivers
#
# CONFIG_DMADEVICES is not set
# CONFIG_REGULATOR is not set
CONFIG_REGULATOR=y
CONFIG_REGULATOR_TWL4030=y
# CONFIG_UIO is not set
# CONFIG_STAGING is not set

View File

@ -201,7 +201,8 @@ static struct tagtable __tagtable_##fn __tag = { tag, fn }
struct membank {
unsigned long start;
unsigned long size;
int node;
unsigned short node;
unsigned short highmem;
};
struct meminfo {

View File

@ -17,7 +17,7 @@
#include <mach/hardware.h>
#define IO_SPACE_LIMIT 0xffff0000
#define IO_SPACE_LIMIT 0x0000ffff
extern int (*ixp4xx_pci_read)(u32 addr, u32 cmd, u32* data);
extern int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data);

View File

@ -63,7 +63,7 @@ static struct imxuart_platform_data uart_pdata = {
static int devboard_sdhc2_get_ro(struct device *dev)
{
return gpio_get_value(SDHC2_WP);
return !gpio_get_value(SDHC2_WP);
}
static int devboard_sdhc2_init(struct device *dev, irq_handler_t detect_irq,

View File

@ -67,7 +67,7 @@ static unsigned int marxbot_pins[] = {
static int marxbot_sdhc2_get_ro(struct device *dev)
{
return gpio_get_value(SDHC2_WP);
return !gpio_get_value(SDHC2_WP);
}
static int marxbot_sdhc2_init(struct device *dev, irq_handler_t detect_irq,

View File

@ -94,7 +94,7 @@ static struct imxi2c_platform_data moboard_i2c1_pdata = {
static int moboard_sdhc1_get_ro(struct device *dev)
{
return gpio_get_value(SDHC1_WP);
return !gpio_get_value(SDHC1_WP);
}
static int moboard_sdhc1_init(struct device *dev, irq_handler_t detect_irq,

View File

@ -24,15 +24,6 @@
#include "devices.h"
static unsigned int pcm037_eet_pins[] = {
/* SPI #1 */
MX31_PIN_CSPI1_MISO__MISO,
MX31_PIN_CSPI1_MOSI__MOSI,
MX31_PIN_CSPI1_SCLK__SCLK,
MX31_PIN_CSPI1_SPI_RDY__SPI_RDY,
MX31_PIN_CSPI1_SS0__SS0,
MX31_PIN_CSPI1_SS1__SS1,
MX31_PIN_CSPI1_SS2__SS2,
/* Reserve and hardwire GPIO 57 high - S6E63D6 chipselect */
IOMUX_MODE(MX31_PIN_KEY_COL7, IOMUX_CONFIG_GPIO),
/* GPIO keys */

View File

@ -141,7 +141,7 @@ static inline void board_smc91x_init(void)
static void __init omap_2430sdp_init_irq(void)
{
omap2_init_common_hw(NULL);
omap2_init_common_hw(NULL, NULL);
omap_init_irq();
omap_gpio_init();
}

View File

@ -169,7 +169,7 @@ static struct platform_device *sdp3430_devices[] __initdata = {
static void __init omap_3430sdp_init_irq(void)
{
omap2_init_common_hw(hyb18m512160af6_sdrc_params);
omap2_init_common_hw(hyb18m512160af6_sdrc_params, NULL);
omap_init_irq();
omap_gpio_init();
}

View File

@ -59,7 +59,7 @@ static void __init gic_init_irq(void)
static void __init omap_4430sdp_init_irq(void)
{
omap2_init_common_hw(NULL);
omap2_init_common_hw(NULL, NULL);
#ifdef CONFIG_OMAP_32K_TIMER
omap2_gp_clockevent_set_gptimer(1);
#endif

View File

@ -250,7 +250,7 @@ static inline void __init apollon_init_smc91x(void)
static void __init omap_apollon_init_irq(void)
{
omap2_init_common_hw(NULL);
omap2_init_common_hw(NULL, NULL);
omap_init_irq();
omap_gpio_init();
apollon_init_smc91x();

View File

@ -33,7 +33,7 @@
static void __init omap_generic_init_irq(void)
{
omap2_init_common_hw(NULL);
omap2_init_common_hw(NULL, NULL);
omap_init_irq();
}

View File

@ -270,7 +270,7 @@ static void __init h4_init_flash(void)
static void __init omap_h4_init_irq(void)
{
omap2_init_common_hw(NULL);
omap2_init_common_hw(NULL, NULL);
omap_init_irq();
omap_gpio_init();
h4_init_flash();

View File

@ -270,7 +270,7 @@ static inline void __init ldp_init_smsc911x(void)
static void __init omap_ldp_init_irq(void)
{
omap2_init_common_hw(NULL);
omap2_init_common_hw(NULL, NULL);
omap_init_irq();
omap_gpio_init();
ldp_init_smsc911x();

View File

@ -282,7 +282,8 @@ static int __init omap3_beagle_i2c_init(void)
static void __init omap3_beagle_init_irq(void)
{
omap2_init_common_hw(mt46h32m32lf6_sdrc_params);
omap2_init_common_hw(mt46h32m32lf6_sdrc_params,
mt46h32m32lf6_sdrc_params);
omap_init_irq();
#ifdef CONFIG_OMAP_32K_TIMER
omap2_gp_clockevent_set_gptimer(12);
@ -408,6 +409,10 @@ static void __init omap3_beagle_init(void)
usb_musb_init();
omap3beagle_flash_init();
/* Ensure SDRC pins are mux'd for self-refresh */
omap_cfg_reg(H16_34XX_SDRC_CKE0);
omap_cfg_reg(H17_34XX_SDRC_CKE1);
}
static void __init omap3_beagle_map_io(void)

View File

@ -280,7 +280,7 @@ struct spi_board_info omap3evm_spi_board_info[] = {
static void __init omap3_evm_init_irq(void)
{
omap2_init_common_hw(mt46h32m32lf6_sdrc_params);
omap2_init_common_hw(mt46h32m32lf6_sdrc_params, NULL);
omap_init_irq();
omap_gpio_init();
omap3evm_init_smc911x();

View File

@ -40,6 +40,7 @@
#include <mach/mcspi.h>
#include <mach/usb.h>
#include <mach/keypad.h>
#include <mach/mux.h>
#include "sdram-micron-mt46h32m32lf-6.h"
#include "mmc-twl4030.h"
@ -310,7 +311,8 @@ static int __init omap3pandora_i2c_init(void)
static void __init omap3pandora_init_irq(void)
{
omap2_init_common_hw(mt46h32m32lf6_sdrc_params);
omap2_init_common_hw(mt46h32m32lf6_sdrc_params,
mt46h32m32lf6_sdrc_params);
omap_init_irq();
omap_gpio_init();
}
@ -397,6 +399,10 @@ static void __init omap3pandora_init(void)
omap3pandora_ads7846_init();
pandora_keys_gpio_init();
usb_musb_init();
/* Ensure SDRC pins are mux'd for self-refresh */
omap_cfg_reg(H16_34XX_SDRC_CKE0);
omap_cfg_reg(H17_34XX_SDRC_CKE1);
}
static void __init omap3pandora_map_io(void)

View File

@ -44,6 +44,7 @@
#include <mach/gpmc.h>
#include <mach/hardware.h>
#include <mach/nand.h>
#include <mach/mux.h>
#include <mach/usb.h>
#include "sdram-micron-mt46h32m32lf-6.h"
@ -51,6 +52,7 @@
#define OVERO_GPIO_BT_XGATE 15
#define OVERO_GPIO_W2W_NRESET 16
#define OVERO_GPIO_PENDOWN 114
#define OVERO_GPIO_BT_NRESET 164
#define OVERO_GPIO_USBH_CPEN 168
#define OVERO_GPIO_USBH_NRESET 183
@ -146,7 +148,7 @@ static struct platform_device overo_smsc911x_device = {
.name = "smsc911x",
.id = -1,
.num_resources = ARRAY_SIZE(overo_smsc911x_resources),
.resource = &overo_smsc911x_resources,
.resource = overo_smsc911x_resources,
.dev = {
.platform_data = &overo_smsc911x_config,
},
@ -360,7 +362,8 @@ static int __init overo_i2c_init(void)
static void __init overo_init_irq(void)
{
omap2_init_common_hw(mt46h32m32lf6_sdrc_params);
omap2_init_common_hw(mt46h32m32lf6_sdrc_params,
mt46h32m32lf6_sdrc_params);
omap_init_irq();
omap_gpio_init();
}
@ -395,6 +398,10 @@ static void __init overo_init(void)
overo_ads7846_init();
overo_init_smsc911x();
/* Ensure SDRC pins are mux'd for self-refresh */
omap_cfg_reg(H16_34XX_SDRC_CKE0);
omap_cfg_reg(H17_34XX_SDRC_CKE1);
if ((gpio_request(OVERO_GPIO_W2W_NRESET,
"OVERO_GPIO_W2W_NRESET") == 0) &&
(gpio_direction_output(OVERO_GPIO_W2W_NRESET, 1) == 0)) {

View File

@ -278,6 +278,10 @@ static struct twl4030_gpio_platform_data rx51_gpio_data = {
.setup = rx51_twlgpio_setup,
};
static struct twl4030_usb_data rx51_usb_data = {
.usb_mode = T2_USB_MODE_ULPI,
};
static struct twl4030_platform_data rx51_twldata = {
.irq_base = TWL4030_IRQ_BASE,
.irq_end = TWL4030_IRQ_END,
@ -286,6 +290,7 @@ static struct twl4030_platform_data rx51_twldata = {
.gpio = &rx51_gpio_data,
.keypad = &rx51_kp_data,
.madc = &rx51_madc_data,
.usb = &rx51_usb_data,
.vaux1 = &rx51_vaux1,
.vaux2 = &rx51_vaux2,

View File

@ -61,7 +61,7 @@ static struct omap_board_config_kernel rx51_config[] = {
static void __init rx51_init_irq(void)
{
omap2_init_common_hw(NULL);
omap2_init_common_hw(NULL, NULL);
omap_init_irq();
omap_gpio_init();
}
@ -75,6 +75,10 @@ static void __init rx51_init(void)
omap_serial_init();
usb_musb_init();
rx51_peripherals_init();
/* Ensure SDRC pins are mux'd for self-refresh */
omap_cfg_reg(H16_34XX_SDRC_CKE0);
omap_cfg_reg(H17_34XX_SDRC_CKE1);
}
static void __init rx51_map_io(void)

View File

@ -25,7 +25,7 @@
static void __init omap_zoom2_init_irq(void)
{
omap2_init_common_hw(NULL);
omap2_init_common_hw(NULL, NULL);
omap_init_irq();
omap_gpio_init();
}

View File

@ -27,6 +27,7 @@
#include <mach/clock.h>
#include <mach/clockdomain.h>
#include <mach/cpu.h>
#include <mach/prcm.h>
#include <asm/div64.h>
#include <mach/sdrc.h>
@ -38,8 +39,6 @@
#include "cm-regbits-24xx.h"
#include "cm-regbits-34xx.h"
#define MAX_CLOCK_ENABLE_WAIT 100000
/* DPLL rate rounding: minimum DPLL multiplier, divider values */
#define DPLL_MIN_MULTIPLIER 1
#define DPLL_MIN_DIVIDER 1
@ -274,83 +273,97 @@ unsigned long omap2_fixed_divisor_recalc(struct clk *clk)
}
/**
* omap2_wait_clock_ready - wait for clock to enable
* @reg: physical address of clock IDLEST register
* @mask: value to mask against to determine if the clock is active
* @name: name of the clock (for printk)
* omap2_clk_dflt_find_companion - find companion clock to @clk
* @clk: struct clk * to find the companion clock of
* @other_reg: void __iomem ** to return the companion clock CM_*CLKEN va in
* @other_bit: u8 ** to return the companion clock bit shift in
*
* Returns 1 if the clock enabled in time, or 0 if it failed to enable
* in roughly MAX_CLOCK_ENABLE_WAIT microseconds.
*/
int omap2_wait_clock_ready(void __iomem *reg, u32 mask, const char *name)
{
int i = 0;
int ena = 0;
/*
* 24xx uses 0 to indicate not ready, and 1 to indicate ready.
* 34xx reverses this, just to keep us on our toes
*/
if (cpu_mask & (RATE_IN_242X | RATE_IN_243X))
ena = mask;
else if (cpu_mask & RATE_IN_343X)
ena = 0;
/* Wait for lock */
while (((__raw_readl(reg) & mask) != ena) &&
(i++ < MAX_CLOCK_ENABLE_WAIT)) {
udelay(1);
}
if (i <= MAX_CLOCK_ENABLE_WAIT)
pr_debug("Clock %s stable after %d loops\n", name, i);
else
printk(KERN_ERR "Clock %s didn't enable in %d tries\n",
name, MAX_CLOCK_ENABLE_WAIT);
return (i < MAX_CLOCK_ENABLE_WAIT) ? 1 : 0;
};
/*
* Note: We don't need special code here for INVERT_ENABLE
* for the time being since INVERT_ENABLE only applies to clocks enabled by
* Note: We don't need special code here for INVERT_ENABLE for the
* time being since INVERT_ENABLE only applies to clocks enabled by
* CM_CLKEN_PLL
*
* Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes it's
* just a matter of XORing the bits.
*
* Some clocks don't have companion clocks. For example, modules with
* only an interface clock (such as MAILBOXES) don't have a companion
* clock. Right now, this code relies on the hardware exporting a bit
* in the correct companion register that indicates that the
* nonexistent 'companion clock' is active. Future patches will
* associate this type of code with per-module data structures to
* avoid this issue, and remove the casts. No return value.
*/
static void omap2_clk_wait_ready(struct clk *clk)
void omap2_clk_dflt_find_companion(struct clk *clk, void __iomem **other_reg,
u8 *other_bit)
{
void __iomem *reg, *other_reg, *st_reg;
u32 bit;
/*
* REVISIT: This code is pretty ugly. It would be nice to generalize
* it and pull it into struct clk itself somehow.
*/
reg = clk->enable_reg;
u32 r;
/*
* Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes
* it's just a matter of XORing the bits.
*/
other_reg = (void __iomem *)((u32)reg ^ (CM_FCLKEN ^ CM_ICLKEN));
r = ((__force u32)clk->enable_reg ^ (CM_FCLKEN ^ CM_ICLKEN));
/* Check if both functional and interface clocks
* are running. */
bit = 1 << clk->enable_bit;
if (!(__raw_readl(other_reg) & bit))
return;
st_reg = (void __iomem *)(((u32)other_reg & ~0xf0) | 0x20); /* CM_IDLEST* */
omap2_wait_clock_ready(st_reg, bit, clk->name);
*other_reg = (__force void __iomem *)r;
*other_bit = clk->enable_bit;
}
static int omap2_dflt_clk_enable(struct clk *clk)
/**
* omap2_clk_dflt_find_idlest - find CM_IDLEST reg va, bit shift for @clk
* @clk: struct clk * to find IDLEST info for
* @idlest_reg: void __iomem ** to return the CM_IDLEST va in
* @idlest_bit: u8 ** to return the CM_IDLEST bit shift in
*
* Return the CM_IDLEST register address and bit shift corresponding
* to the module that "owns" this clock. This default code assumes
* that the CM_IDLEST bit shift is the CM_*CLKEN bit shift, and that
* the IDLEST register address ID corresponds to the CM_*CLKEN
* register address ID (e.g., that CM_FCLKEN2 corresponds to
* CM_IDLEST2). This is not true for all modules. No return value.
*/
void omap2_clk_dflt_find_idlest(struct clk *clk, void __iomem **idlest_reg,
u8 *idlest_bit)
{
u32 r;
r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
*idlest_reg = (__force void __iomem *)r;
*idlest_bit = clk->enable_bit;
}
/**
* omap2_module_wait_ready - wait for an OMAP module to leave IDLE
* @clk: struct clk * belonging to the module
*
* If the necessary clocks for the OMAP hardware IP block that
* corresponds to clock @clk are enabled, then wait for the module to
* indicate readiness (i.e., to leave IDLE). This code does not
* belong in the clock code and will be moved in the medium term to
* module-dependent code. No return value.
*/
static void omap2_module_wait_ready(struct clk *clk)
{
void __iomem *companion_reg, *idlest_reg;
u8 other_bit, idlest_bit;
/* Not all modules have multiple clocks that their IDLEST depends on */
if (clk->ops->find_companion) {
clk->ops->find_companion(clk, &companion_reg, &other_bit);
if (!(__raw_readl(companion_reg) & (1 << other_bit)))
return;
}
clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit);
omap2_cm_wait_idlest(idlest_reg, (1 << idlest_bit), clk->name);
}
int omap2_dflt_clk_enable(struct clk *clk)
{
u32 v;
if (unlikely(clk->enable_reg == NULL)) {
printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
pr_err("clock.c: Enable for %s without enable code\n",
clk->name);
return 0; /* REVISIT: -EINVAL */
}
@ -363,26 +376,13 @@ static int omap2_dflt_clk_enable(struct clk *clk)
__raw_writel(v, clk->enable_reg);
v = __raw_readl(clk->enable_reg); /* OCP barrier */
if (clk->ops->find_idlest)
omap2_module_wait_ready(clk);
return 0;
}
static int omap2_dflt_clk_enable_wait(struct clk *clk)
{
int ret;
if (!clk->enable_reg) {
printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
clk->name);
return 0; /* REVISIT: -EINVAL */
}
ret = omap2_dflt_clk_enable(clk);
if (ret == 0)
omap2_clk_wait_ready(clk);
return ret;
}
static void omap2_dflt_clk_disable(struct clk *clk)
void omap2_dflt_clk_disable(struct clk *clk)
{
u32 v;
@ -406,8 +406,10 @@ static void omap2_dflt_clk_disable(struct clk *clk)
}
const struct clkops clkops_omap2_dflt_wait = {
.enable = omap2_dflt_clk_enable_wait,
.enable = omap2_dflt_clk_enable,
.disable = omap2_dflt_clk_disable,
.find_companion = omap2_clk_dflt_find_companion,
.find_idlest = omap2_clk_dflt_find_idlest,
};
const struct clkops clkops_omap2_dflt = {

View File

@ -65,6 +65,12 @@ int omap2_clksel_set_rate(struct clk *clk, unsigned long rate);
u32 omap2_get_dpll_rate(struct clk *clk);
int omap2_wait_clock_ready(void __iomem *reg, u32 cval, const char *name);
void omap2_clk_prepare_for_reboot(void);
int omap2_dflt_clk_enable(struct clk *clk);
void omap2_dflt_clk_disable(struct clk *clk);
void omap2_clk_dflt_find_companion(struct clk *clk, void __iomem **other_reg,
u8 *other_bit);
void omap2_clk_dflt_find_idlest(struct clk *clk, void __iomem **idlest_reg,
u8 *idlest_bit);
extern const struct clkops clkops_omap2_dflt_wait;
extern const struct clkops clkops_omap2_dflt;

View File

@ -30,6 +30,7 @@
#include <mach/clock.h>
#include <mach/sram.h>
#include <mach/prcm.h>
#include <asm/div64.h>
#include <asm/clkdev.h>
@ -43,6 +44,18 @@
static const struct clkops clkops_oscck;
static const struct clkops clkops_fixed;
static void omap2430_clk_i2chs_find_idlest(struct clk *clk,
void __iomem **idlest_reg,
u8 *idlest_bit);
/* 2430 I2CHS has non-standard IDLEST register */
static const struct clkops clkops_omap2430_i2chs_wait = {
.enable = omap2_dflt_clk_enable,
.disable = omap2_dflt_clk_disable,
.find_idlest = omap2430_clk_i2chs_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
#include "clock24xx.h"
struct omap_clk {
@ -239,6 +252,26 @@ static void __iomem *prcm_clksrc_ctrl;
* Omap24xx specific clock functions
*-------------------------------------------------------------------------*/
/**
* omap2430_clk_i2chs_find_idlest - return CM_IDLEST info for 2430 I2CHS
* @clk: struct clk * being enabled
* @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
* @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
*
* OMAP2430 I2CHS CM_IDLEST bits are in CM_IDLEST1_CORE, but the
* CM_*CLKEN bits are in CM_{I,F}CLKEN2_CORE. This custom function
* passes back the correct CM_IDLEST register address for I2CHS
* modules. No return value.
*/
static void omap2430_clk_i2chs_find_idlest(struct clk *clk,
void __iomem **idlest_reg,
u8 *idlest_bit)
{
*idlest_reg = OMAP_CM_REGADDR(CORE_MOD, CM_IDLEST);
*idlest_bit = clk->enable_bit;
}
/**
* omap2xxx_clk_get_core_rate - return the CORE_CLK rate
* @clk: pointer to the combined dpll_ck + core_ck (currently "dpll_ck")
@ -325,7 +358,7 @@ static int omap2_clk_fixed_enable(struct clk *clk)
else if (clk == &apll54_ck)
cval = OMAP24XX_ST_54M_APLL;
omap2_wait_clock_ready(OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST), cval,
omap2_cm_wait_idlest(OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST), cval,
clk->name);
/*

View File

@ -2337,7 +2337,7 @@ static struct clk i2c2_fck = {
static struct clk i2chs2_fck = {
.name = "i2c_fck",
.ops = &clkops_omap2_dflt_wait,
.ops = &clkops_omap2430_i2chs_wait,
.id = 2,
.parent = &func_96m_ck,
.clkdm_name = "core_l4_clkdm",
@ -2370,7 +2370,7 @@ static struct clk i2c1_fck = {
static struct clk i2chs1_fck = {
.name = "i2c_fck",
.ops = &clkops_omap2_dflt_wait,
.ops = &clkops_omap2430_i2chs_wait,
.id = 1,
.parent = &func_96m_ck,
.clkdm_name = "core_l4_clkdm",

View File

@ -2,7 +2,7 @@
* OMAP3-specific clock framework functions
*
* Copyright (C) 2007-2008 Texas Instruments, Inc.
* Copyright (C) 2007-2008 Nokia Corporation
* Copyright (C) 2007-2009 Nokia Corporation
*
* Written by Paul Walmsley
* Testing and integration fixes by Jouni Högander
@ -41,6 +41,37 @@
static const struct clkops clkops_noncore_dpll_ops;
static void omap3430es2_clk_ssi_find_idlest(struct clk *clk,
void __iomem **idlest_reg,
u8 *idlest_bit);
static void omap3430es2_clk_hsotgusb_find_idlest(struct clk *clk,
void __iomem **idlest_reg,
u8 *idlest_bit);
static void omap3430es2_clk_dss_usbhost_find_idlest(struct clk *clk,
void __iomem **idlest_reg,
u8 *idlest_bit);
static const struct clkops clkops_omap3430es2_ssi_wait = {
.enable = omap2_dflt_clk_enable,
.disable = omap2_dflt_clk_disable,
.find_idlest = omap3430es2_clk_ssi_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
static const struct clkops clkops_omap3430es2_hsotgusb_wait = {
.enable = omap2_dflt_clk_enable,
.disable = omap2_dflt_clk_disable,
.find_idlest = omap3430es2_clk_hsotgusb_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
static const struct clkops clkops_omap3430es2_dss_usbhost_wait = {
.enable = omap2_dflt_clk_enable,
.disable = omap2_dflt_clk_disable,
.find_idlest = omap3430es2_clk_dss_usbhost_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
#include "clock34xx.h"
struct omap_clk {
@ -157,10 +188,13 @@ static struct omap_clk omap34xx_clks[] = {
CLK(NULL, "fshostusb_fck", &fshostusb_fck, CK_3430ES1),
CLK(NULL, "core_12m_fck", &core_12m_fck, CK_343X),
CLK("omap_hdq.0", "fck", &hdq_fck, CK_343X),
CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck, CK_343X),
CLK(NULL, "ssi_sst_fck", &ssi_sst_fck, CK_343X),
CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es1, CK_3430ES1),
CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es2, CK_3430ES2),
CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es1, CK_3430ES1),
CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es2, CK_3430ES2),
CLK(NULL, "core_l3_ick", &core_l3_ick, CK_343X),
CLK("musb_hdrc", "ick", &hsotgusb_ick, CK_343X),
CLK("musb_hdrc", "ick", &hsotgusb_ick_3430es1, CK_3430ES1),
CLK("musb_hdrc", "ick", &hsotgusb_ick_3430es2, CK_3430ES2),
CLK(NULL, "sdrc_ick", &sdrc_ick, CK_343X),
CLK(NULL, "gpmc_fck", &gpmc_fck, CK_343X),
CLK(NULL, "security_l3_ick", &security_l3_ick, CK_343X),
@ -193,18 +227,21 @@ static struct omap_clk omap34xx_clks[] = {
CLK(NULL, "mailboxes_ick", &mailboxes_ick, CK_343X),
CLK(NULL, "omapctrl_ick", &omapctrl_ick, CK_343X),
CLK(NULL, "ssi_l4_ick", &ssi_l4_ick, CK_343X),
CLK(NULL, "ssi_ick", &ssi_ick, CK_343X),
CLK(NULL, "ssi_ick", &ssi_ick_3430es1, CK_3430ES1),
CLK(NULL, "ssi_ick", &ssi_ick_3430es2, CK_3430ES2),
CLK(NULL, "usb_l4_ick", &usb_l4_ick, CK_3430ES1),
CLK(NULL, "security_l4_ick2", &security_l4_ick2, CK_343X),
CLK(NULL, "aes1_ick", &aes1_ick, CK_343X),
CLK("omap_rng", "ick", &rng_ick, CK_343X),
CLK(NULL, "sha11_ick", &sha11_ick, CK_343X),
CLK(NULL, "des1_ick", &des1_ick, CK_343X),
CLK("omapfb", "dss1_fck", &dss1_alwon_fck, CK_343X),
CLK("omapfb", "dss1_fck", &dss1_alwon_fck_3430es1, CK_3430ES1),
CLK("omapfb", "dss1_fck", &dss1_alwon_fck_3430es2, CK_3430ES2),
CLK("omapfb", "tv_fck", &dss_tv_fck, CK_343X),
CLK("omapfb", "video_fck", &dss_96m_fck, CK_343X),
CLK("omapfb", "dss2_fck", &dss2_alwon_fck, CK_343X),
CLK("omapfb", "ick", &dss_ick, CK_343X),
CLK("omapfb", "ick", &dss_ick_3430es1, CK_3430ES1),
CLK("omapfb", "ick", &dss_ick_3430es2, CK_3430ES2),
CLK(NULL, "cam_mclk", &cam_mclk, CK_343X),
CLK(NULL, "cam_ick", &cam_ick, CK_343X),
CLK(NULL, "csi2_96m_fck", &csi2_96m_fck, CK_343X),
@ -300,6 +337,73 @@ static struct omap_clk omap34xx_clks[] = {
*/
#define SDRC_MPURATE_LOOPS 96
/**
* omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI
* @clk: struct clk * being enabled
* @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
* @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
*
* The OMAP3430ES2 SSI target CM_IDLEST bit is at a different shift
* from the CM_{I,F}CLKEN bit. Pass back the correct info via
* @idlest_reg and @idlest_bit. No return value.
*/
static void omap3430es2_clk_ssi_find_idlest(struct clk *clk,
void __iomem **idlest_reg,
u8 *idlest_bit)
{
u32 r;
r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
*idlest_reg = (__force void __iomem *)r;
*idlest_bit = OMAP3430ES2_ST_SSI_IDLE_SHIFT;
}
/**
* omap3430es2_clk_dss_usbhost_find_idlest - CM_IDLEST info for DSS, USBHOST
* @clk: struct clk * being enabled
* @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
* @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
*
* Some OMAP modules on OMAP3 ES2+ chips have both initiator and
* target IDLEST bits. For our purposes, we are concerned with the
* target IDLEST bits, which exist at a different bit position than
* the *CLKEN bit position for these modules (DSS and USBHOST) (The
* default find_idlest code assumes that they are at the same
* position.) No return value.
*/
static void omap3430es2_clk_dss_usbhost_find_idlest(struct clk *clk,
void __iomem **idlest_reg,
u8 *idlest_bit)
{
u32 r;
r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
*idlest_reg = (__force void __iomem *)r;
/* USBHOST_IDLE has same shift */
*idlest_bit = OMAP3430ES2_ST_DSS_IDLE_SHIFT;
}
/**
* omap3430es2_clk_hsotgusb_find_idlest - return CM_IDLEST info for HSOTGUSB
* @clk: struct clk * being enabled
* @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
* @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
*
* The OMAP3430ES2 HSOTGUSB target CM_IDLEST bit is at a different
* shift from the CM_{I,F}CLKEN bit. Pass back the correct info via
* @idlest_reg and @idlest_bit. No return value.
*/
static void omap3430es2_clk_hsotgusb_find_idlest(struct clk *clk,
void __iomem **idlest_reg,
u8 *idlest_bit)
{
u32 r;
r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
*idlest_reg = (__force void __iomem *)r;
*idlest_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT;
}
/**
* omap3_dpll_recalc - recalculate DPLL rate
* @clk: DPLL struct clk
@ -725,7 +829,9 @@ static int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
u32 unlock_dll = 0;
u32 c;
unsigned long validrate, sdrcrate, mpurate;
struct omap_sdrc_params *sp;
struct omap_sdrc_params *sdrc_cs0;
struct omap_sdrc_params *sdrc_cs1;
int ret;
if (!clk || !rate)
return -EINVAL;
@ -743,8 +849,8 @@ static int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
else
sdrcrate >>= ((clk->rate / rate) >> 1);
sp = omap2_sdrc_get_params(sdrcrate);
if (!sp)
ret = omap2_sdrc_get_params(sdrcrate, &sdrc_cs0, &sdrc_cs1);
if (ret)
return -EINVAL;
if (sdrcrate < MIN_SDRC_DLL_LOCK_FREQ) {
@ -765,12 +871,29 @@ static int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
pr_debug("clock: changing CORE DPLL rate from %lu to %lu\n", clk->rate,
validrate);
pr_debug("clock: SDRC timing params used: %08x %08x %08x\n",
sp->rfr_ctrl, sp->actim_ctrla, sp->actim_ctrlb);
pr_debug("clock: SDRC CS0 timing params used:"
" RFR %08x CTRLA %08x CTRLB %08x MR %08x\n",
sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla,
sdrc_cs0->actim_ctrlb, sdrc_cs0->mr);
if (sdrc_cs1)
pr_debug("clock: SDRC CS1 timing params used: "
" RFR %08x CTRLA %08x CTRLB %08x MR %08x\n",
sdrc_cs1->rfr_ctrl, sdrc_cs1->actim_ctrla,
sdrc_cs1->actim_ctrlb, sdrc_cs1->mr);
omap3_configure_core_dpll(sp->rfr_ctrl, sp->actim_ctrla,
sp->actim_ctrlb, new_div, unlock_dll, c,
sp->mr, rate > clk->rate);
if (sdrc_cs1)
omap3_configure_core_dpll(
new_div, unlock_dll, c, rate > clk->rate,
sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla,
sdrc_cs0->actim_ctrlb, sdrc_cs0->mr,
sdrc_cs1->rfr_ctrl, sdrc_cs1->actim_ctrla,
sdrc_cs1->actim_ctrlb, sdrc_cs1->mr);
else
omap3_configure_core_dpll(
new_div, unlock_dll, c, rate > clk->rate,
sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla,
sdrc_cs0->actim_ctrlb, sdrc_cs0->mr,
0, 0, 0, 0);
return 0;
}

View File

@ -1568,7 +1568,7 @@ static const struct clksel ssi_ssr_clksel[] = {
{ .parent = NULL }
};
static struct clk ssi_ssr_fck = {
static struct clk ssi_ssr_fck_3430es1 = {
.name = "ssi_ssr_fck",
.ops = &clkops_omap2_dflt,
.init = &omap2_init_clksel_parent,
@ -1581,10 +1581,31 @@ static struct clk ssi_ssr_fck = {
.recalc = &omap2_clksel_recalc,
};
static struct clk ssi_sst_fck = {
static struct clk ssi_ssr_fck_3430es2 = {
.name = "ssi_ssr_fck",
.ops = &clkops_omap3430es2_ssi_wait,
.init = &omap2_init_clksel_parent,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP3430_EN_SSI_SHIFT,
.clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
.clksel_mask = OMAP3430_CLKSEL_SSI_MASK,
.clksel = ssi_ssr_clksel,
.clkdm_name = "core_l4_clkdm",
.recalc = &omap2_clksel_recalc,
};
static struct clk ssi_sst_fck_3430es1 = {
.name = "ssi_sst_fck",
.ops = &clkops_null,
.parent = &ssi_ssr_fck,
.parent = &ssi_ssr_fck_3430es1,
.fixed_div = 2,
.recalc = &omap2_fixed_divisor_recalc,
};
static struct clk ssi_sst_fck_3430es2 = {
.name = "ssi_sst_fck",
.ops = &clkops_null,
.parent = &ssi_ssr_fck_3430es2,
.fixed_div = 2,
.recalc = &omap2_fixed_divisor_recalc,
};
@ -1606,9 +1627,19 @@ static struct clk core_l3_ick = {
.recalc = &followparent_recalc,
};
static struct clk hsotgusb_ick = {
static struct clk hsotgusb_ick_3430es1 = {
.name = "hsotgusb_ick",
.ops = &clkops_omap2_dflt_wait,
.ops = &clkops_omap2_dflt,
.parent = &core_l3_ick,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP3430_EN_HSOTGUSB_SHIFT,
.clkdm_name = "core_l3_clkdm",
.recalc = &followparent_recalc,
};
static struct clk hsotgusb_ick_3430es2 = {
.name = "hsotgusb_ick",
.ops = &clkops_omap3430es2_hsotgusb_wait,
.parent = &core_l3_ick,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP3430_EN_HSOTGUSB_SHIFT,
@ -1947,7 +1978,7 @@ static struct clk ssi_l4_ick = {
.recalc = &followparent_recalc,
};
static struct clk ssi_ick = {
static struct clk ssi_ick_3430es1 = {
.name = "ssi_ick",
.ops = &clkops_omap2_dflt,
.parent = &ssi_l4_ick,
@ -1957,6 +1988,16 @@ static struct clk ssi_ick = {
.recalc = &followparent_recalc,
};
static struct clk ssi_ick_3430es2 = {
.name = "ssi_ick",
.ops = &clkops_omap3430es2_ssi_wait,
.parent = &ssi_l4_ick,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP3430_EN_SSI_SHIFT,
.clkdm_name = "core_l4_clkdm",
.recalc = &followparent_recalc,
};
/* REVISIT: Technically the TRM claims that this is CORE_CLK based,
* but l4_ick makes more sense to me */
@ -2024,7 +2065,7 @@ static struct clk des1_ick = {
};
/* DSS */
static struct clk dss1_alwon_fck = {
static struct clk dss1_alwon_fck_3430es1 = {
.name = "dss1_alwon_fck",
.ops = &clkops_omap2_dflt,
.parent = &dpll4_m4x2_ck,
@ -2034,6 +2075,16 @@ static struct clk dss1_alwon_fck = {
.recalc = &followparent_recalc,
};
static struct clk dss1_alwon_fck_3430es2 = {
.name = "dss1_alwon_fck",
.ops = &clkops_omap3430es2_dss_usbhost_wait,
.parent = &dpll4_m4x2_ck,
.enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
.enable_bit = OMAP3430_EN_DSS1_SHIFT,
.clkdm_name = "dss_clkdm",
.recalc = &followparent_recalc,
};
static struct clk dss_tv_fck = {
.name = "dss_tv_fck",
.ops = &clkops_omap2_dflt,
@ -2067,7 +2118,7 @@ static struct clk dss2_alwon_fck = {
.recalc = &followparent_recalc,
};
static struct clk dss_ick = {
static struct clk dss_ick_3430es1 = {
/* Handles both L3 and L4 clocks */
.name = "dss_ick",
.ops = &clkops_omap2_dflt,
@ -2079,6 +2130,18 @@ static struct clk dss_ick = {
.recalc = &followparent_recalc,
};
static struct clk dss_ick_3430es2 = {
/* Handles both L3 and L4 clocks */
.name = "dss_ick",
.ops = &clkops_omap3430es2_dss_usbhost_wait,
.parent = &l4_ick,
.init = &omap2_init_clk_clkdm,
.enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN),
.enable_bit = OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT,
.clkdm_name = "dss_clkdm",
.recalc = &followparent_recalc,
};
/* CAM */
static struct clk cam_mclk = {
@ -2118,7 +2181,7 @@ static struct clk csi2_96m_fck = {
static struct clk usbhost_120m_fck = {
.name = "usbhost_120m_fck",
.ops = &clkops_omap2_dflt_wait,
.ops = &clkops_omap2_dflt,
.parent = &dpll5_m2_ck,
.init = &omap2_init_clk_clkdm,
.enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN),
@ -2129,7 +2192,7 @@ static struct clk usbhost_120m_fck = {
static struct clk usbhost_48m_fck = {
.name = "usbhost_48m_fck",
.ops = &clkops_omap2_dflt_wait,
.ops = &clkops_omap3430es2_dss_usbhost_wait,
.parent = &omap_48m_fck,
.init = &omap2_init_clk_clkdm,
.enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN),
@ -2141,7 +2204,7 @@ static struct clk usbhost_48m_fck = {
static struct clk usbhost_ick = {
/* Handles both L3 and L4 clocks */
.name = "usbhost_ick",
.ops = &clkops_omap2_dflt_wait,
.ops = &clkops_omap3430es2_dss_usbhost_wait,
.parent = &l4_ick,
.init = &omap2_init_clk_clkdm,
.enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_ICLKEN),

View File

@ -29,9 +29,9 @@
* These registers appear once per CM module.
*/
#define OMAP3430_CM_REVISION OMAP_CM_REGADDR(OCP_MOD, 0x0000)
#define OMAP3430_CM_SYSCONFIG OMAP_CM_REGADDR(OCP_MOD, 0x0010)
#define OMAP3430_CM_POLCTRL OMAP_CM_REGADDR(OCP_MOD, 0x009c)
#define OMAP3430_CM_REVISION OMAP34XX_CM_REGADDR(OCP_MOD, 0x0000)
#define OMAP3430_CM_SYSCONFIG OMAP34XX_CM_REGADDR(OCP_MOD, 0x0010)
#define OMAP3430_CM_POLCTRL OMAP34XX_CM_REGADDR(OCP_MOD, 0x009c)
#define OMAP3_CM_CLKOUT_CTRL_OFFSET 0x0070
#define OMAP3430_CM_CLKOUT_CTRL OMAP_CM_REGADDR(OMAP3430_CCR_MOD, 0x0070)

View File

@ -276,14 +276,15 @@ static int __init _omap2_init_reprogram_sdrc(void)
return v;
}
void __init omap2_init_common_hw(struct omap_sdrc_params *sp)
void __init omap2_init_common_hw(struct omap_sdrc_params *sdrc_cs0,
struct omap_sdrc_params *sdrc_cs1)
{
omap2_mux_init();
#ifndef CONFIG_ARCH_OMAP4 /* FIXME: Remove this once the clkdev is ready */
pwrdm_init(powerdomains_omap);
clkdm_init(clockdomains_omap, clkdm_pwrdm_autodeps);
omap2_clk_init();
omap2_sdrc_init(sp);
omap2_sdrc_init(sdrc_cs0, sdrc_cs1);
_omap2_init_reprogram_sdrc();
#endif
gpmc_init();

View File

@ -119,6 +119,7 @@ static int twl_mmc_late_init(struct device *dev)
if (i != 0)
break;
ret = PTR_ERR(reg);
hsmmc[i].vcc = NULL;
goto err;
}
hsmmc[i].vcc = reg;
@ -165,8 +166,13 @@ static int twl_mmc_late_init(struct device *dev)
static void twl_mmc_cleanup(struct device *dev)
{
struct omap_mmc_platform_data *mmc = dev->platform_data;
int i;
gpio_free(mmc->slots[0].switch_pin);
for(i = 0; i < ARRAY_SIZE(hsmmc); i++) {
regulator_put(hsmmc[i].vcc);
regulator_put(hsmmc[i].vcc_aux);
}
}
#ifdef CONFIG_PM

View File

@ -486,6 +486,12 @@ MUX_CFG_34XX("H19_34XX_GPIO164_OUT", 0x19c,
OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT)
MUX_CFG_34XX("J25_34XX_GPIO170", 0x1c6,
OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT)
/* OMAP3 SDRC CKE signals to SDR/DDR ram chips */
MUX_CFG_34XX("H16_34XX_SDRC_CKE0", 0x262,
OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_OUTPUT)
MUX_CFG_34XX("H17_34XX_SDRC_CKE1", 0x264,
OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_OUTPUT)
};
#define OMAP34XX_PINS_SZ ARRAY_SIZE(omap34xx_pins)

View File

@ -11,9 +11,6 @@
#ifndef __ARCH_ARM_MACH_OMAP2_PM_H
#define __ARCH_ARM_MACH_OMAP2_PM_H
extern int omap2_pm_init(void);
extern int omap3_pm_init(void);
#ifdef CONFIG_PM_DEBUG
extern void omap2_pm_dump(int mode, int resume, unsigned int us);
extern int omap2_pm_debug;

View File

@ -470,7 +470,7 @@ static void __init prcm_setup_regs(void)
WKUP_MOD, PM_WKEN);
}
int __init omap2_pm_init(void)
static int __init omap2_pm_init(void)
{
u32 l;

View File

@ -39,7 +39,9 @@
struct power_state {
struct powerdomain *pwrdm;
u32 next_state;
#ifdef CONFIG_SUSPEND
u32 saved_state;
#endif
struct list_head node;
};
@ -293,6 +295,9 @@ static void omap3_pm_idle(void)
local_irq_enable();
}
#ifdef CONFIG_SUSPEND
static suspend_state_t suspend_state;
static int omap3_pm_prepare(void)
{
disable_hlt();
@ -321,7 +326,6 @@ static int omap3_pm_suspend(void)
restore:
/* Restore next_pwrsts */
list_for_each_entry(pwrst, &pwrst_list, node) {
set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
state = pwrdm_read_prev_pwrst(pwrst->pwrdm);
if (state > pwrst->next_state) {
printk(KERN_INFO "Powerdomain (%s) didn't enter "
@ -329,6 +333,7 @@ static int omap3_pm_suspend(void)
pwrst->pwrdm->name, pwrst->next_state);
ret = -1;
}
set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
}
if (ret)
printk(KERN_ERR "Could not enter target state in pm_suspend\n");
@ -339,11 +344,11 @@ static int omap3_pm_suspend(void)
return ret;
}
static int omap3_pm_enter(suspend_state_t state)
static int omap3_pm_enter(suspend_state_t unused)
{
int ret = 0;
switch (state) {
switch (suspend_state) {
case PM_SUSPEND_STANDBY:
case PM_SUSPEND_MEM:
ret = omap3_pm_suspend();
@ -360,12 +365,30 @@ static void omap3_pm_finish(void)
enable_hlt();
}
/* Hooks to enable / disable UART interrupts during suspend */
static int omap3_pm_begin(suspend_state_t state)
{
suspend_state = state;
omap_uart_enable_irqs(0);
return 0;
}
static void omap3_pm_end(void)
{
suspend_state = PM_SUSPEND_ON;
omap_uart_enable_irqs(1);
return;
}
static struct platform_suspend_ops omap_pm_ops = {
.begin = omap3_pm_begin,
.end = omap3_pm_end,
.prepare = omap3_pm_prepare,
.enter = omap3_pm_enter,
.finish = omap3_pm_finish,
.valid = suspend_valid_only_mem,
};
#endif /* CONFIG_SUSPEND */
/**
@ -613,6 +636,24 @@ static void __init prcm_setup_regs(void)
/* Clear any pending PRCM interrupts */
prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
/* Don't attach IVA interrupts */
prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL);
prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1);
prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3);
prm_write_mod_reg(0, OMAP3430_PER_MOD, OMAP3430_PM_IVAGRPSEL);
/* Clear any pending 'reset' flags */
prm_write_mod_reg(0xffffffff, MPU_MOD, RM_RSTST);
prm_write_mod_reg(0xffffffff, CORE_MOD, RM_RSTST);
prm_write_mod_reg(0xffffffff, OMAP3430_PER_MOD, RM_RSTST);
prm_write_mod_reg(0xffffffff, OMAP3430_EMU_MOD, RM_RSTST);
prm_write_mod_reg(0xffffffff, OMAP3430_NEON_MOD, RM_RSTST);
prm_write_mod_reg(0xffffffff, OMAP3430_DSS_MOD, RM_RSTST);
prm_write_mod_reg(0xffffffff, OMAP3430ES2_USBHOST_MOD, RM_RSTST);
/* Clear any pending PRCM interrupts */
prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
omap3_iva_idle();
omap3_d2d_idle();
}
@ -652,7 +693,7 @@ static int __init clkdms_setup(struct clockdomain *clkdm)
return 0;
}
int __init omap3_pm_init(void)
static int __init omap3_pm_init(void)
{
struct power_state *pwrst, *tmp;
int ret;
@ -692,7 +733,9 @@ int __init omap3_pm_init(void)
_omap_sram_idle = omap_sram_push(omap34xx_cpu_suspend,
omap34xx_cpu_suspend_sz);
#ifdef CONFIG_SUSPEND
suspend_set_ops(&omap_pm_ops);
#endif /* CONFIG_SUSPEND */
pm_idle = omap3_pm_idle;

View File

@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <mach/common.h>
#include <mach/prcm.h>
@ -28,6 +29,8 @@
static void __iomem *prm_base;
static void __iomem *cm_base;
#define MAX_MODULE_ENABLE_WAIT 100000
u32 omap_prcm_get_reset_sources(void)
{
/* XXX This presumably needs modification for 34XX */
@ -120,6 +123,46 @@ u32 cm_rmw_mod_reg_bits(u32 mask, u32 bits, s16 module, s16 idx)
}
EXPORT_SYMBOL(cm_rmw_mod_reg_bits);
/**
* omap2_cm_wait_idlest - wait for IDLEST bit to indicate module readiness
* @reg: physical address of module IDLEST register
* @mask: value to mask against to determine if the module is active
* @name: name of the clock (for printk)
*
* Returns 1 if the module indicated readiness in time, or 0 if it
* failed to enable in roughly MAX_MODULE_ENABLE_WAIT microseconds.
*/
int omap2_cm_wait_idlest(void __iomem *reg, u32 mask, const char *name)
{
int i = 0;
int ena = 0;
/*
* 24xx uses 0 to indicate not ready, and 1 to indicate ready.
* 34xx reverses this, just to keep us on our toes
*/
if (cpu_is_omap24xx())
ena = mask;
else if (cpu_is_omap34xx())
ena = 0;
else
BUG();
/* Wait for lock */
while (((__raw_readl(reg) & mask) != ena) &&
(i++ < MAX_MODULE_ENABLE_WAIT))
udelay(1);
if (i < MAX_MODULE_ENABLE_WAIT)
pr_debug("cm: Module associated with clock %s ready after %d "
"loops\n", name, i);
else
pr_err("cm: Module associated with clock %s didn't enable in "
"%d tries\n", name, MAX_MODULE_ENABLE_WAIT);
return (i < MAX_MODULE_ENABLE_WAIT) ? 1 : 0;
};
void __init omap2_set_globals_prcm(struct omap_globals *omap2_globals)
{
prm_base = omap2_globals->prm;

View File

@ -32,7 +32,7 @@
#include <mach/sdrc.h>
#include "sdrc.h"
static struct omap_sdrc_params *sdrc_init_params;
static struct omap_sdrc_params *sdrc_init_params_cs0, *sdrc_init_params_cs1;
void __iomem *omap2_sdrc_base;
void __iomem *omap2_sms_base;
@ -45,33 +45,49 @@ void __iomem *omap2_sms_base;
/**
* omap2_sdrc_get_params - return SDRC register values for a given clock rate
* @r: SDRC clock rate (in Hz)
* @sdrc_cs0: chip select 0 ram timings **
* @sdrc_cs1: chip select 1 ram timings **
*
* Return pre-calculated values for the SDRC_ACTIM_CTRLA,
* SDRC_ACTIM_CTRLB, SDRC_RFR_CTRL, and SDRC_MR registers, for a given
* SDRC clock rate 'r'. These parameters control various timing
* delays in the SDRAM controller that are expressed in terms of the
* number of SDRC clock cycles to wait; hence the clock rate
* dependency. Note that sdrc_init_params must be sorted rate
* descending. Also assumes that both chip-selects use the same
* timing parameters. Returns a struct omap_sdrc_params * upon
* success, or NULL upon failure.
* SDRC_ACTIM_CTRLB, SDRC_RFR_CTRL and SDRC_MR registers in sdrc_cs[01]
* structs,for a given SDRC clock rate 'r'.
* These parameters control various timing delays in the SDRAM controller
* that are expressed in terms of the number of SDRC clock cycles to
* wait; hence the clock rate dependency.
*
* Supports 2 different timing parameters for both chip selects.
*
* Note 1: the sdrc_init_params_cs[01] must be sorted rate descending.
* Note 2: If sdrc_init_params_cs_1 is not NULL it must be of same size
* as sdrc_init_params_cs_0.
*
* Fills in the struct omap_sdrc_params * for each chip select.
* Returns 0 upon success or -1 upon failure.
*/
struct omap_sdrc_params *omap2_sdrc_get_params(unsigned long r)
int omap2_sdrc_get_params(unsigned long r,
struct omap_sdrc_params **sdrc_cs0,
struct omap_sdrc_params **sdrc_cs1)
{
struct omap_sdrc_params *sp;
struct omap_sdrc_params *sp0, *sp1;
if (!sdrc_init_params)
return NULL;
if (!sdrc_init_params_cs0)
return -1;
sp = sdrc_init_params;
sp0 = sdrc_init_params_cs0;
sp1 = sdrc_init_params_cs1;
while (sp->rate && sp->rate != r)
sp++;
while (sp0->rate && sp0->rate != r) {
sp0++;
if (sdrc_init_params_cs1)
sp1++;
}
if (!sp->rate)
return NULL;
if (!sp0->rate)
return -1;
return sp;
*sdrc_cs0 = sp0;
*sdrc_cs1 = sp1;
return 0;
}
@ -83,13 +99,15 @@ void __init omap2_set_globals_sdrc(struct omap_globals *omap2_globals)
/**
* omap2_sdrc_init - initialize SMS, SDRC devices on boot
* @sp: pointer to a null-terminated list of struct omap_sdrc_params
* @sdrc_cs[01]: pointers to a null-terminated list of struct omap_sdrc_params
* Support for 2 chip selects timings
*
* Turn on smart idle modes for SDRAM scheduler and controller.
* Program a known-good configuration for the SDRC to deal with buggy
* bootloaders.
*/
void __init omap2_sdrc_init(struct omap_sdrc_params *sp)
void __init omap2_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
struct omap_sdrc_params *sdrc_cs1)
{
u32 l;
@ -103,11 +121,15 @@ void __init omap2_sdrc_init(struct omap_sdrc_params *sp)
l |= (0x2 << 3);
sdrc_write_reg(l, SDRC_SYSCONFIG);
sdrc_init_params = sp;
sdrc_init_params_cs0 = sdrc_cs0;
sdrc_init_params_cs1 = sdrc_cs1;
/* XXX Enable SRFRONIDLEREQ here also? */
/*
* PWDENA should not be set due to 34xx erratum 1.150 - PWDENA
* can cause random memory corruption
*/
l = (1 << SDRC_POWER_EXTCLKDIS_SHIFT) |
(1 << SDRC_POWER_PWDENA_SHIFT) |
(1 << SDRC_POWER_PAGEPOLICY_SHIFT);
sdrc_write_reg(l, SDRC_POWER);
}

View File

@ -54,6 +54,7 @@ struct omap_uart_state {
struct plat_serial8250_port *p;
struct list_head node;
struct platform_device pdev;
#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
int context_valid;
@ -68,10 +69,9 @@ struct omap_uart_state {
#endif
};
static struct omap_uart_state omap_uart[OMAP_MAX_NR_PORTS];
static LIST_HEAD(uart_list);
static struct plat_serial8250_port serial_platform_data[] = {
static struct plat_serial8250_port serial_platform_data0[] = {
{
.membase = IO_ADDRESS(OMAP_UART1_BASE),
.mapbase = OMAP_UART1_BASE,
@ -81,6 +81,12 @@ static struct plat_serial8250_port serial_platform_data[] = {
.regshift = 2,
.uartclk = OMAP24XX_BASE_BAUD * 16,
}, {
.flags = 0
}
};
static struct plat_serial8250_port serial_platform_data1[] = {
{
.membase = IO_ADDRESS(OMAP_UART2_BASE),
.mapbase = OMAP_UART2_BASE,
.irq = 73,
@ -89,6 +95,12 @@ static struct plat_serial8250_port serial_platform_data[] = {
.regshift = 2,
.uartclk = OMAP24XX_BASE_BAUD * 16,
}, {
.flags = 0
}
};
static struct plat_serial8250_port serial_platform_data2[] = {
{
.membase = IO_ADDRESS(OMAP_UART3_BASE),
.mapbase = OMAP_UART3_BASE,
.irq = 74,
@ -217,6 +229,40 @@ static inline void omap_uart_disable_clocks(struct omap_uart_state *uart)
clk_disable(uart->fck);
}
static void omap_uart_enable_wakeup(struct omap_uart_state *uart)
{
/* Set wake-enable bit */
if (uart->wk_en && uart->wk_mask) {
u32 v = __raw_readl(uart->wk_en);
v |= uart->wk_mask;
__raw_writel(v, uart->wk_en);
}
/* Ensure IOPAD wake-enables are set */
if (cpu_is_omap34xx() && uart->padconf) {
u16 v = omap_ctrl_readw(uart->padconf);
v |= OMAP3_PADCONF_WAKEUPENABLE0;
omap_ctrl_writew(v, uart->padconf);
}
}
static void omap_uart_disable_wakeup(struct omap_uart_state *uart)
{
/* Clear wake-enable bit */
if (uart->wk_en && uart->wk_mask) {
u32 v = __raw_readl(uart->wk_en);
v &= ~uart->wk_mask;
__raw_writel(v, uart->wk_en);
}
/* Ensure IOPAD wake-enables are cleared */
if (cpu_is_omap34xx() && uart->padconf) {
u16 v = omap_ctrl_readw(uart->padconf);
v &= ~OMAP3_PADCONF_WAKEUPENABLE0;
omap_ctrl_writew(v, uart->padconf);
}
}
static void omap_uart_smart_idle_enable(struct omap_uart_state *uart,
int enable)
{
@ -246,6 +292,11 @@ static void omap_uart_block_sleep(struct omap_uart_state *uart)
static void omap_uart_allow_sleep(struct omap_uart_state *uart)
{
if (device_may_wakeup(&uart->pdev.dev))
omap_uart_enable_wakeup(uart);
else
omap_uart_disable_wakeup(uart);
if (!uart->clocked)
return;
@ -292,7 +343,6 @@ void omap_uart_resume_idle(int num)
/* Check for normal UART wakeup */
if (__raw_readl(uart->wk_st) & uart->wk_mask)
omap_uart_block_sleep(uart);
return;
}
}
@ -346,16 +396,13 @@ static irqreturn_t omap_uart_interrupt(int irq, void *dev_id)
return IRQ_NONE;
}
static u32 sleep_timeout = DEFAULT_TIMEOUT;
static void omap_uart_idle_init(struct omap_uart_state *uart)
{
u32 v;
struct plat_serial8250_port *p = uart->p;
int ret;
uart->can_sleep = 0;
uart->timeout = sleep_timeout;
uart->timeout = DEFAULT_TIMEOUT;
setup_timer(&uart->timer, omap_uart_idle_timer,
(unsigned long) uart);
mod_timer(&uart->timer, jiffies + uart->timeout);
@ -413,76 +460,101 @@ static void omap_uart_idle_init(struct omap_uart_state *uart)
uart->padconf = 0;
}
/* Set wake-enable bit */
if (uart->wk_en && uart->wk_mask) {
v = __raw_readl(uart->wk_en);
v |= uart->wk_mask;
__raw_writel(v, uart->wk_en);
}
/* Ensure IOPAD wake-enables are set */
if (cpu_is_omap34xx() && uart->padconf) {
u16 v;
v = omap_ctrl_readw(uart->padconf);
v |= OMAP3_PADCONF_WAKEUPENABLE0;
omap_ctrl_writew(v, uart->padconf);
}
p->flags |= UPF_SHARE_IRQ;
ret = request_irq(p->irq, omap_uart_interrupt, IRQF_SHARED,
"serial idle", (void *)uart);
WARN_ON(ret);
}
static ssize_t sleep_timeout_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
void omap_uart_enable_irqs(int enable)
{
return sprintf(buf, "%u\n", sleep_timeout / HZ);
int ret;
struct omap_uart_state *uart;
list_for_each_entry(uart, &uart_list, node) {
if (enable)
ret = request_irq(uart->p->irq, omap_uart_interrupt,
IRQF_SHARED, "serial idle", (void *)uart);
else
free_irq(uart->p->irq, (void *)uart);
}
}
static ssize_t sleep_timeout_store(struct kobject *kobj,
struct kobj_attribute *attr,
static ssize_t sleep_timeout_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct platform_device *pdev = container_of(dev,
struct platform_device, dev);
struct omap_uart_state *uart = container_of(pdev,
struct omap_uart_state, pdev);
return sprintf(buf, "%u\n", uart->timeout / HZ);
}
static ssize_t sleep_timeout_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t n)
{
struct omap_uart_state *uart;
struct platform_device *pdev = container_of(dev,
struct platform_device, dev);
struct omap_uart_state *uart = container_of(pdev,
struct omap_uart_state, pdev);
unsigned int value;
if (sscanf(buf, "%u", &value) != 1) {
printk(KERN_ERR "sleep_timeout_store: Invalid value\n");
return -EINVAL;
}
sleep_timeout = value * HZ;
list_for_each_entry(uart, &uart_list, node) {
uart->timeout = sleep_timeout;
uart->timeout = value * HZ;
if (uart->timeout)
mod_timer(&uart->timer, jiffies + uart->timeout);
else
/* A zero value means disable timeout feature */
omap_uart_block_sleep(uart);
}
return n;
}
static struct kobj_attribute sleep_timeout_attr =
__ATTR(sleep_timeout, 0644, sleep_timeout_show, sleep_timeout_store);
DEVICE_ATTR(sleep_timeout, 0644, sleep_timeout_show, sleep_timeout_store);
#define DEV_CREATE_FILE(dev, attr) WARN_ON(device_create_file(dev, attr))
#else
static inline void omap_uart_idle_init(struct omap_uart_state *uart) {}
#define DEV_CREATE_FILE(dev, attr)
#endif /* CONFIG_PM */
static struct platform_device serial_device = {
static struct omap_uart_state omap_uart[OMAP_MAX_NR_PORTS] = {
{
.pdev = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = serial_platform_data,
.platform_data = serial_platform_data0,
},
},
}, {
.pdev = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM1,
.dev = {
.platform_data = serial_platform_data1,
},
},
}, {
.pdev = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM2,
.dev = {
.platform_data = serial_platform_data2,
},
},
},
};
void __init omap_serial_init(void)
{
int i, err;
int i;
const struct omap_uart_config *info;
char name[16];
@ -496,14 +568,12 @@ void __init omap_serial_init(void)
if (info == NULL)
return;
if (cpu_is_omap44xx()) {
for (i = 0; i < OMAP_MAX_NR_PORTS; i++)
serial_platform_data[i].irq += 32;
}
for (i = 0; i < OMAP_MAX_NR_PORTS; i++) {
struct plat_serial8250_port *p = serial_platform_data + i;
struct omap_uart_state *uart = &omap_uart[i];
struct platform_device *pdev = &uart->pdev;
struct device *dev = &pdev->dev;
struct plat_serial8250_port *p = dev->platform_data;
if (!(info->enabled_uarts & (1 << i))) {
p->membase = NULL;
@ -531,20 +601,21 @@ void __init omap_serial_init(void)
uart->num = i;
p->private_data = uart;
uart->p = p;
list_add(&uart->node, &uart_list);
list_add_tail(&uart->node, &uart_list);
if (cpu_is_omap44xx())
p->irq += 32;
omap_uart_enable_clocks(uart);
omap_uart_reset(uart);
omap_uart_idle_init(uart);
if (WARN_ON(platform_device_register(pdev)))
continue;
if ((cpu_is_omap34xx() && uart->padconf) ||
(uart->wk_en && uart->wk_mask)) {
device_init_wakeup(dev, true);
DEV_CREATE_FILE(dev, &dev_attr_sleep_timeout);
}
}
err = platform_device_register(&serial_device);
#ifdef CONFIG_PM
if (!err)
err = sysfs_create_file(&serial_device.dev.kobj,
&sleep_timeout_attr.attr);
#endif
}

View File

@ -36,7 +36,7 @@
.text
/* r4 parameters */
/* r1 parameters */
#define SDRC_NO_UNLOCK_DLL 0x0
#define SDRC_UNLOCK_DLL 0x1
@ -58,7 +58,6 @@
/* SDRC_POWER bit settings */
#define SRFRONIDLEREQ_MASK 0x40
#define PWDENA_MASK 0x4
/* CM_IDLEST1_CORE bit settings */
#define ST_SDRC_MASK 0x2
@ -71,41 +70,72 @@
/*
* omap3_sram_configure_core_dpll - change DPLL3 M2 divider
* r0 = new SDRC_RFR_CTRL register contents
* r1 = new SDRC_ACTIM_CTRLA register contents
* r2 = new SDRC_ACTIM_CTRLB register contents
* r3 = new M2 divider setting (only 1 and 2 supported right now)
* r4 = unlock SDRC DLL? (1 = yes, 0 = no). Only unlock DLL for
* SDRC rates < 83MHz
* r5 = number of MPU cycles to wait for SDRC to stabilize after
* reprogramming the SDRC when switching to a slower MPU speed
* r6 = new SDRC_MR_0 register value
* r7 = increasing SDRC rate? (1 = yes, 0 = no)
*
* Params passed in registers:
* r0 = new M2 divider setting (only 1 and 2 supported right now)
* r1 = unlock SDRC DLL? (1 = yes, 0 = no). Only unlock DLL for
* SDRC rates < 83MHz
* r2 = number of MPU cycles to wait for SDRC to stabilize after
* reprogramming the SDRC when switching to a slower MPU speed
* r3 = increasing SDRC rate? (1 = yes, 0 = no)
*
* Params passed via the stack. The needed params will be copied in SRAM
* before use by the code in SRAM (SDRAM is not accessible during SDRC
* reconfiguration):
* new SDRC_RFR_CTRL_0 register contents
* new SDRC_ACTIM_CTRL_A_0 register contents
* new SDRC_ACTIM_CTRL_B_0 register contents
* new SDRC_MR_0 register value
* new SDRC_RFR_CTRL_1 register contents
* new SDRC_ACTIM_CTRL_A_1 register contents
* new SDRC_ACTIM_CTRL_B_1 register contents
* new SDRC_MR_1 register value
*
* If the param SDRC_RFR_CTRL_1 is 0, the parameters
* are not programmed into the SDRC CS1 registers
*/
ENTRY(omap3_sram_configure_core_dpll)
stmfd sp!, {r1-r12, lr} @ store regs to stack
ldr r4, [sp, #52] @ pull extra args off the stack
ldr r5, [sp, #56] @ load extra args from the stack
ldr r6, [sp, #60] @ load extra args from the stack
ldr r7, [sp, #64] @ load extra args from the stack
@ pull the extra args off the stack
@ and store them in SRAM
ldr r4, [sp, #52]
str r4, omap_sdrc_rfr_ctrl_0_val
ldr r4, [sp, #56]
str r4, omap_sdrc_actim_ctrl_a_0_val
ldr r4, [sp, #60]
str r4, omap_sdrc_actim_ctrl_b_0_val
ldr r4, [sp, #64]
str r4, omap_sdrc_mr_0_val
ldr r4, [sp, #68]
str r4, omap_sdrc_rfr_ctrl_1_val
cmp r4, #0 @ if SDRC_RFR_CTRL_1 is 0,
beq skip_cs1_params @ do not use cs1 params
ldr r4, [sp, #72]
str r4, omap_sdrc_actim_ctrl_a_1_val
ldr r4, [sp, #76]
str r4, omap_sdrc_actim_ctrl_b_1_val
ldr r4, [sp, #80]
str r4, omap_sdrc_mr_1_val
skip_cs1_params:
dsb @ flush buffered writes to interconnect
cmp r7, #1 @ if increasing SDRC clk rate,
cmp r3, #1 @ if increasing SDRC clk rate,
bleq configure_sdrc @ program the SDRC regs early (for RFR)
cmp r4, #SDRC_UNLOCK_DLL @ set the intended DLL state
cmp r1, #SDRC_UNLOCK_DLL @ set the intended DLL state
bleq unlock_dll
blne lock_dll
bl sdram_in_selfrefresh @ put SDRAM in self refresh, idle SDRC
bl configure_core_dpll @ change the DPLL3 M2 divider
mov r12, r2
bl wait_clk_stable @ wait for SDRC to stabilize
bl enable_sdrc @ take SDRC out of idle
cmp r4, #SDRC_UNLOCK_DLL @ wait for DLL status to change
cmp r1, #SDRC_UNLOCK_DLL @ wait for DLL status to change
bleq wait_dll_unlock
blne wait_dll_lock
cmp r7, #1 @ if increasing SDRC clk rate,
cmp r3, #1 @ if increasing SDRC clk rate,
beq return_to_sdram @ return to SDRAM code, otherwise,
bl configure_sdrc @ reprogram SDRC regs now
mov r12, r5
bl wait_clk_stable @ wait for SDRC to stabilize
return_to_sdram:
isb @ prevent speculative exec past here
mov r0, #0 @ return value
@ -113,7 +143,7 @@ return_to_sdram:
unlock_dll:
ldr r11, omap3_sdrc_dlla_ctrl
ldr r12, [r11]
and r12, r12, #FIXEDDELAY_MASK
bic r12, r12, #FIXEDDELAY_MASK
orr r12, r12, #FIXEDDELAY_DEFAULT
orr r12, r12, #DLLIDLE_MASK
str r12, [r11] @ (no OCP barrier needed)
@ -129,7 +159,6 @@ sdram_in_selfrefresh:
ldr r12, [r11] @ read the contents of SDRC_POWER
mov r9, r12 @ keep a copy of SDRC_POWER bits
orr r12, r12, #SRFRONIDLEREQ_MASK @ enable self refresh on idle
bic r12, r12, #PWDENA_MASK @ clear PWDENA
str r12, [r11] @ write back to SDRC_POWER register
ldr r12, [r11] @ posted-write barrier for SDRC
idle_sdrc:
@ -149,7 +178,7 @@ configure_core_dpll:
ldr r12, [r11]
ldr r10, core_m2_mask_val @ modify m2 for core dpll
and r12, r12, r10
orr r12, r12, r3, lsl #CORE_DPLL_CLKOUT_DIV_SHIFT
orr r12, r12, r0, lsl #CORE_DPLL_CLKOUT_DIV_SHIFT
str r12, [r11]
ldr r12, [r11] @ posted-write barrier for CM
bx lr
@ -187,15 +216,34 @@ wait_dll_unlock:
bne wait_dll_unlock
bx lr
configure_sdrc:
ldr r11, omap3_sdrc_rfr_ctrl
str r0, [r11]
ldr r11, omap3_sdrc_actim_ctrla
str r1, [r11]
ldr r11, omap3_sdrc_actim_ctrlb
str r2, [r11]
ldr r12, omap_sdrc_rfr_ctrl_0_val @ fetch value from SRAM
ldr r11, omap3_sdrc_rfr_ctrl_0 @ fetch addr from SRAM
str r12, [r11] @ store
ldr r12, omap_sdrc_actim_ctrl_a_0_val
ldr r11, omap3_sdrc_actim_ctrl_a_0
str r12, [r11]
ldr r12, omap_sdrc_actim_ctrl_b_0_val
ldr r11, omap3_sdrc_actim_ctrl_b_0
str r12, [r11]
ldr r12, omap_sdrc_mr_0_val
ldr r11, omap3_sdrc_mr_0
str r6, [r11]
ldr r6, [r11] @ posted-write barrier for SDRC
str r12, [r11]
ldr r12, omap_sdrc_rfr_ctrl_1_val
cmp r12, #0 @ if SDRC_RFR_CTRL_1 is 0,
beq skip_cs1_prog @ do not program cs1 params
ldr r11, omap3_sdrc_rfr_ctrl_1
str r12, [r11]
ldr r12, omap_sdrc_actim_ctrl_a_1_val
ldr r11, omap3_sdrc_actim_ctrl_a_1
str r12, [r11]
ldr r12, omap_sdrc_actim_ctrl_b_1_val
ldr r11, omap3_sdrc_actim_ctrl_b_1
str r12, [r11]
ldr r12, omap_sdrc_mr_1_val
ldr r11, omap3_sdrc_mr_1
str r12, [r11]
skip_cs1_prog:
ldr r12, [r11] @ posted-write barrier for SDRC
bx lr
omap3_sdrc_power:
@ -206,14 +254,40 @@ omap3_cm_idlest1_core:
.word OMAP34XX_CM_REGADDR(CORE_MOD, CM_IDLEST)
omap3_cm_iclken1_core:
.word OMAP34XX_CM_REGADDR(CORE_MOD, CM_ICLKEN1)
omap3_sdrc_rfr_ctrl:
omap3_sdrc_rfr_ctrl_0:
.word OMAP34XX_SDRC_REGADDR(SDRC_RFR_CTRL_0)
omap3_sdrc_actim_ctrla:
omap3_sdrc_rfr_ctrl_1:
.word OMAP34XX_SDRC_REGADDR(SDRC_RFR_CTRL_1)
omap3_sdrc_actim_ctrl_a_0:
.word OMAP34XX_SDRC_REGADDR(SDRC_ACTIM_CTRL_A_0)
omap3_sdrc_actim_ctrlb:
omap3_sdrc_actim_ctrl_a_1:
.word OMAP34XX_SDRC_REGADDR(SDRC_ACTIM_CTRL_A_1)
omap3_sdrc_actim_ctrl_b_0:
.word OMAP34XX_SDRC_REGADDR(SDRC_ACTIM_CTRL_B_0)
omap3_sdrc_actim_ctrl_b_1:
.word OMAP34XX_SDRC_REGADDR(SDRC_ACTIM_CTRL_B_1)
omap3_sdrc_mr_0:
.word OMAP34XX_SDRC_REGADDR(SDRC_MR_0)
omap3_sdrc_mr_1:
.word OMAP34XX_SDRC_REGADDR(SDRC_MR_1)
omap_sdrc_rfr_ctrl_0_val:
.word 0xDEADBEEF
omap_sdrc_rfr_ctrl_1_val:
.word 0xDEADBEEF
omap_sdrc_actim_ctrl_a_0_val:
.word 0xDEADBEEF
omap_sdrc_actim_ctrl_a_1_val:
.word 0xDEADBEEF
omap_sdrc_actim_ctrl_b_0_val:
.word 0xDEADBEEF
omap_sdrc_actim_ctrl_b_1_val:
.word 0xDEADBEEF
omap_sdrc_mr_0_val:
.word 0xDEADBEEF
omap_sdrc_mr_1_val:
.word 0xDEADBEEF
omap3_sdrc_dlla_status:
.word OMAP34XX_SDRC_REGADDR(SDRC_DLLA_STATUS)
omap3_sdrc_dlla_ctrl:
@ -223,3 +297,4 @@ core_m2_mask_val:
ENTRY(omap3_sram_configure_core_dpll_sz)
.word . - omap3_sram_configure_core_dpll

View File

@ -510,7 +510,7 @@ static struct db_chip db_chips[] __initdata = {
}
};
static void u300_init_check_chip(void)
static void __init u300_init_check_chip(void)
{
u16 val;

View File

@ -120,6 +120,32 @@ void show_mem(void)
printk("%d pages swap cached\n", cached);
}
static void __init find_node_limits(int node, struct meminfo *mi,
unsigned long *min, unsigned long *max_low, unsigned long *max_high)
{
int i;
*min = -1UL;
*max_low = *max_high = 0;
for_each_nodebank(i, mi, node) {
struct membank *bank = &mi->bank[i];
unsigned long start, end;
start = bank_pfn_start(bank);
end = bank_pfn_end(bank);
if (*min > start)
*min = start;
if (*max_high < end)
*max_high = end;
if (bank->highmem)
continue;
if (*max_low < end)
*max_low = end;
}
}
/*
* FIXME: We really want to avoid allocating the bootmap bitmap
* over the top of the initrd. Hopefully, this is located towards
@ -210,40 +236,24 @@ static inline void map_memory_bank(struct membank *bank)
#endif
}
static unsigned long __init bootmem_init_node(int node, struct meminfo *mi)
static void __init bootmem_init_node(int node, struct meminfo *mi,
unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long start_pfn, end_pfn, boot_pfn;
unsigned long boot_pfn;
unsigned int boot_pages;
pg_data_t *pgdat;
int i;
start_pfn = -1UL;
end_pfn = 0;
/*
* Calculate the pfn range, and map the memory banks for this node.
* Map the memory banks for this node.
*/
for_each_nodebank(i, mi, node) {
struct membank *bank = &mi->bank[i];
unsigned long start, end;
start = bank_pfn_start(bank);
end = bank_pfn_end(bank);
if (start_pfn > start)
start_pfn = start;
if (end_pfn < end)
end_pfn = end;
if (!bank->highmem)
map_memory_bank(bank);
}
/*
* If there is no memory in this node, ignore it.
*/
if (end_pfn == 0)
return end_pfn;
/*
* Allocate the bootmem bitmap page.
*/
@ -260,6 +270,7 @@ static unsigned long __init bootmem_init_node(int node, struct meminfo *mi)
for_each_nodebank(i, mi, node) {
struct membank *bank = &mi->bank[i];
if (!bank->highmem)
free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank));
memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank));
}
@ -269,8 +280,6 @@ static unsigned long __init bootmem_init_node(int node, struct meminfo *mi)
*/
reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT,
boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
return end_pfn;
}
static void __init bootmem_reserve_initrd(int node)
@ -297,33 +306,39 @@ static void __init bootmem_reserve_initrd(int node)
static void __init bootmem_free_node(int node, struct meminfo *mi)
{
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
unsigned long start_pfn, end_pfn;
pg_data_t *pgdat = NODE_DATA(node);
unsigned long min, max_low, max_high;
int i;
start_pfn = pgdat->bdata->node_min_pfn;
end_pfn = pgdat->bdata->node_low_pfn;
find_node_limits(node, mi, &min, &max_low, &max_high);
/*
* initialise the zones within this node.
*/
memset(zone_size, 0, sizeof(zone_size));
memset(zhole_size, 0, sizeof(zhole_size));
/*
* The size of this node has already been determined. If we need
* to do anything fancy with the allocation of this memory to the
* zones, now is the time to do it.
*/
zone_size[0] = end_pfn - start_pfn;
zone_size[0] = max_low - min;
#ifdef CONFIG_HIGHMEM
zone_size[ZONE_HIGHMEM] = max_high - max_low;
#endif
/*
* For each bank in this node, calculate the size of the holes.
* holes = node_size - sum(bank_sizes_in_node)
*/
zhole_size[0] = zone_size[0];
for_each_nodebank(i, mi, node)
zhole_size[0] -= bank_pfn_size(&mi->bank[i]);
memcpy(zhole_size, zone_size, sizeof(zhole_size));
for_each_nodebank(i, mi, node) {
int idx = 0;
#ifdef CONFIG_HIGHMEM
if (mi->bank[i].highmem)
idx = ZONE_HIGHMEM;
#endif
zhole_size[idx] -= bank_pfn_size(&mi->bank[i]);
}
/*
* Adjust the sizes according to any special requirements for
@ -331,13 +346,13 @@ static void __init bootmem_free_node(int node, struct meminfo *mi)
*/
arch_adjust_zones(node, zone_size, zhole_size);
free_area_init_node(node, zone_size, start_pfn, zhole_size);
free_area_init_node(node, zone_size, min, zhole_size);
}
void __init bootmem_init(void)
{
struct meminfo *mi = &meminfo;
unsigned long memend_pfn = 0;
unsigned long min, max_low, max_high;
int node, initrd_node;
/*
@ -345,11 +360,29 @@ void __init bootmem_init(void)
*/
initrd_node = check_initrd(mi);
max_low = max_high = 0;
/*
* Run through each node initialising the bootmem allocator.
*/
for_each_node(node) {
unsigned long end_pfn = bootmem_init_node(node, mi);
unsigned long node_low, node_high;
find_node_limits(node, mi, &min, &node_low, &node_high);
if (node_low > max_low)
max_low = node_low;
if (node_high > max_high)
max_high = node_high;
/*
* If there is no memory in this node, ignore it.
* (We can't have nodes which have no lowmem)
*/
if (node_low == 0)
continue;
bootmem_init_node(node, mi, min, node_low);
/*
* Reserve any special node zero regions.
@ -362,12 +395,6 @@ void __init bootmem_init(void)
*/
if (node == initrd_node)
bootmem_reserve_initrd(node);
/*
* Remember the highest memory PFN.
*/
if (end_pfn > memend_pfn)
memend_pfn = end_pfn;
}
/*
@ -383,7 +410,7 @@ void __init bootmem_init(void)
for_each_node(node)
bootmem_free_node(node, mi);
high_memory = __va((memend_pfn << PAGE_SHIFT) - 1) + 1;
high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
/*
* This doesn't seem to be used by the Linux memory manager any
@ -393,7 +420,8 @@ void __init bootmem_init(void)
* Note: max_low_pfn and max_pfn reflect the number of _pages_ in
* the system, not the maximum PFN.
*/
max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET;
max_low_pfn = max_low - PHYS_PFN_OFFSET;
max_pfn = max_high - PHYS_PFN_OFFSET;
}
static inline int free_area(unsigned long pfn, unsigned long end, char *s)

View File

@ -687,13 +687,19 @@ __early_param("vmalloc=", early_vmalloc);
static void __init sanity_check_meminfo(void)
{
int i, j;
int i, j, highmem = 0;
for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
struct membank *bank = &meminfo.bank[j];
*bank = meminfo.bank[i];
#ifdef CONFIG_HIGHMEM
if (__va(bank->start) > VMALLOC_MIN ||
__va(bank->start) < (void *)PAGE_OFFSET)
highmem = 1;
bank->highmem = highmem;
/*
* Split those memory banks which are partially overlapping
* the vmalloc area greatly simplifying things later.
@ -714,6 +720,7 @@ static void __init sanity_check_meminfo(void)
i++;
bank[1].size -= VMALLOC_MIN - __va(bank->start);
bank[1].start = __pa(VMALLOC_MIN - 1) + 1;
bank[1].highmem = highmem = 1;
j++;
}
bank->size = VMALLOC_MIN - __va(bank->start);

View File

@ -78,10 +78,10 @@ static int omap_target(struct cpufreq_policy *policy,
/* Ensure desired rate is within allowed range. Some govenors
* (ondemand) will just pass target_freq=0 to get the minimum. */
if (target_freq < policy->cpuinfo.min_freq)
target_freq = policy->cpuinfo.min_freq;
if (target_freq > policy->cpuinfo.max_freq)
target_freq = policy->cpuinfo.max_freq;
if (target_freq < policy->min)
target_freq = policy->min;
if (target_freq > policy->max)
target_freq = policy->max;
freqs.old = omap_getspeed(0);
freqs.new = clk_round_rate(mpu_clk, target_freq * 1000) / 1000;

View File

@ -946,7 +946,9 @@ void omap_start_dma(int lch)
cur_lch = next_lch;
} while (next_lch != -1);
} else if (cpu_class_is_omap2()) {
} else if (cpu_is_omap242x() ||
(cpu_is_omap243x() && omap_type() <= OMAP2430_REV_ES1_0)) {
/* Errata: Need to write lch even if not using chaining */
dma_write(lch, CLNK_CTRL(lch));
}

View File

@ -476,14 +476,12 @@ static void _set_gpio_dataout(struct gpio_bank *bank, int gpio, int enable)
__raw_writel(l, reg);
}
static int __omap_get_gpio_datain(int gpio)
static int _get_gpio_datain(struct gpio_bank *bank, int gpio)
{
struct gpio_bank *bank;
void __iomem *reg;
if (check_gpio(gpio) < 0)
return -EINVAL;
bank = get_gpio_bank(gpio);
reg = bank->base;
switch (bank->method) {
#ifdef CONFIG_ARCH_OMAP1
@ -524,6 +522,53 @@ static int __omap_get_gpio_datain(int gpio)
& (1 << get_gpio_index(gpio))) != 0;
}
static int _get_gpio_dataout(struct gpio_bank *bank, int gpio)
{
void __iomem *reg;
if (check_gpio(gpio) < 0)
return -EINVAL;
reg = bank->base;
switch (bank->method) {
#ifdef CONFIG_ARCH_OMAP1
case METHOD_MPUIO:
reg += OMAP_MPUIO_OUTPUT;
break;
#endif
#ifdef CONFIG_ARCH_OMAP15XX
case METHOD_GPIO_1510:
reg += OMAP1510_GPIO_DATA_OUTPUT;
break;
#endif
#ifdef CONFIG_ARCH_OMAP16XX
case METHOD_GPIO_1610:
reg += OMAP1610_GPIO_DATAOUT;
break;
#endif
#ifdef CONFIG_ARCH_OMAP730
case METHOD_GPIO_730:
reg += OMAP730_GPIO_DATA_OUTPUT;
break;
#endif
#ifdef CONFIG_ARCH_OMAP850
case METHOD_GPIO_850:
reg += OMAP850_GPIO_DATA_OUTPUT;
break;
#endif
#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) || \
defined(CONFIG_ARCH_OMAP4)
case METHOD_GPIO_24XX:
reg += OMAP24XX_GPIO_DATAOUT;
break;
#endif
default:
return -EINVAL;
}
return (__raw_readl(reg) & (1 << get_gpio_index(gpio))) != 0;
}
#define MOD_REG_BIT(reg, bit_mask, set) \
do { \
int l = __raw_readl(base + reg); \
@ -1189,6 +1234,7 @@ static void gpio_mask_irq(unsigned int irq)
struct gpio_bank *bank = get_irq_chip_data(irq);
_set_gpio_irqenable(bank, gpio, 0);
_set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE);
}
static void gpio_unmask_irq(unsigned int irq)
@ -1196,6 +1242,11 @@ static void gpio_unmask_irq(unsigned int irq)
unsigned int gpio = irq - IH_GPIO_BASE;
struct gpio_bank *bank = get_irq_chip_data(irq);
unsigned int irq_mask = 1 << get_gpio_index(gpio);
struct irq_desc *desc = irq_to_desc(irq);
u32 trigger = desc->status & IRQ_TYPE_SENSE_MASK;
if (trigger)
_set_gpio_triggering(bank, get_gpio_index(gpio), trigger);
/* For level-triggered GPIOs, the clearing must be done after
* the HW source is cleared, thus after the handler has run */
@ -1356,9 +1407,49 @@ static int gpio_input(struct gpio_chip *chip, unsigned offset)
return 0;
}
static int gpio_is_input(struct gpio_bank *bank, int mask)
{
void __iomem *reg = bank->base;
switch (bank->method) {
case METHOD_MPUIO:
reg += OMAP_MPUIO_IO_CNTL;
break;
case METHOD_GPIO_1510:
reg += OMAP1510_GPIO_DIR_CONTROL;
break;
case METHOD_GPIO_1610:
reg += OMAP1610_GPIO_DIRECTION;
break;
case METHOD_GPIO_730:
reg += OMAP730_GPIO_DIR_CONTROL;
break;
case METHOD_GPIO_850:
reg += OMAP850_GPIO_DIR_CONTROL;
break;
case METHOD_GPIO_24XX:
reg += OMAP24XX_GPIO_OE;
break;
}
return __raw_readl(reg) & mask;
}
static int gpio_get(struct gpio_chip *chip, unsigned offset)
{
return __omap_get_gpio_datain(chip->base + offset);
struct gpio_bank *bank;
void __iomem *reg;
int gpio;
u32 mask;
gpio = chip->base + offset;
bank = get_gpio_bank(gpio);
reg = bank->base;
mask = 1 << get_gpio_index(gpio);
if (gpio_is_input(bank, mask))
return _get_gpio_datain(bank, gpio);
else
return _get_gpio_dataout(bank, gpio);
}
static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
@ -1892,34 +1983,6 @@ arch_initcall(omap_gpio_sysinit);
#include <linux/debugfs.h>
#include <linux/seq_file.h>
static int gpio_is_input(struct gpio_bank *bank, int mask)
{
void __iomem *reg = bank->base;
switch (bank->method) {
case METHOD_MPUIO:
reg += OMAP_MPUIO_IO_CNTL;
break;
case METHOD_GPIO_1510:
reg += OMAP1510_GPIO_DIR_CONTROL;
break;
case METHOD_GPIO_1610:
reg += OMAP1610_GPIO_DIRECTION;
break;
case METHOD_GPIO_730:
reg += OMAP730_GPIO_DIR_CONTROL;
break;
case METHOD_GPIO_850:
reg += OMAP850_GPIO_DIR_CONTROL;
break;
case METHOD_GPIO_24XX:
reg += OMAP24XX_GPIO_OE;
break;
}
return __raw_readl(reg) & mask;
}
static int dbg_gpio_show(struct seq_file *s, void *unused)
{
unsigned i, j, gpio;

View File

@ -20,6 +20,8 @@ struct clockdomain;
struct clkops {
int (*enable)(struct clk *);
void (*disable)(struct clk *);
void (*find_idlest)(struct clk *, void __iomem **, u8 *);
void (*find_companion)(struct clk *, void __iomem **, u8 *);
};
#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) || \

View File

@ -378,9 +378,6 @@ IS_OMAP_TYPE(3430, 0x3430)
#define cpu_class_is_omap2() (cpu_is_omap24xx() || cpu_is_omap34xx() || \
cpu_is_omap44xx())
#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) || \
defined(CONFIG_ARCH_OMAP4)
/* Various silicon revisions for omap2 */
#define OMAP242X_CLASS 0x24200024
#define OMAP2420_REV_ES1_0 0x24200024
@ -436,5 +433,3 @@ IS_OMAP_TYPE(3430, 0x3430)
int omap_chip_is(struct omap_chip_id oci);
void omap2_check_revision(void);
#endif /* defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) */

View File

@ -228,7 +228,8 @@ extern void omap1_map_common_io(void);
extern void omap1_init_common_hw(void);
extern void omap2_map_common_io(void);
extern void omap2_init_common_hw(struct omap_sdrc_params *sp);
extern void omap2_init_common_hw(struct omap_sdrc_params *sdrc_cs0,
struct omap_sdrc_params *sdrc_cs1);
#define __arch_ioremap(p,s,t) omap_ioremap(p,s,t)
#define __arch_iounmap(v) omap_iounmap(v)

View File

@ -853,6 +853,10 @@ enum omap34xx_index {
AE5_34XX_GPIO143,
H19_34XX_GPIO164_OUT,
J25_34XX_GPIO170,
/* OMAP3 SDRC CKE signals to SDR/DDR ram chips */
H16_34XX_SDRC_CKE0,
H17_34XX_SDRC_CKE1,
};
struct omap_mux_cfg {

View File

@ -25,6 +25,7 @@
u32 omap_prcm_get_reset_sources(void);
void omap_prcm_arch_reset(char mode);
int omap2_cm_wait_idlest(void __iomem *reg, u32 mask, const char *name);
#endif

View File

@ -30,6 +30,10 @@
#define SDRC_ACTIM_CTRL_A_0 0x09c
#define SDRC_ACTIM_CTRL_B_0 0x0a0
#define SDRC_RFR_CTRL_0 0x0a4
#define SDRC_MR_1 0x0B4
#define SDRC_ACTIM_CTRL_A_1 0x0C4
#define SDRC_ACTIM_CTRL_B_1 0x0C8
#define SDRC_RFR_CTRL_1 0x0D4
/*
* These values represent the number of memory clock cycles between
@ -102,8 +106,11 @@ struct omap_sdrc_params {
u32 mr;
};
void __init omap2_sdrc_init(struct omap_sdrc_params *sp);
struct omap_sdrc_params *omap2_sdrc_get_params(unsigned long r);
void __init omap2_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
struct omap_sdrc_params *sdrc_cs1);
int omap2_sdrc_get_params(unsigned long r,
struct omap_sdrc_params **sdrc_cs0,
struct omap_sdrc_params **sdrc_cs1);
#ifdef CONFIG_ARCH_OMAP2

View File

@ -59,6 +59,7 @@ extern void omap_uart_check_wakeup(void);
extern void omap_uart_prepare_suspend(void);
extern void omap_uart_prepare_idle(int num);
extern void omap_uart_resume_idle(int num);
extern void omap_uart_enable_irqs(int enable);
#endif
#endif

View File

@ -21,11 +21,12 @@ extern void omap2_sram_reprogram_sdrc(u32 perf_level, u32 dll_val,
u32 mem_type);
extern u32 omap2_set_prcm(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass);
extern u32 omap3_configure_core_dpll(u32 sdrc_rfr_ctrl,
u32 sdrc_actim_ctrla,
u32 sdrc_actim_ctrlb, u32 m2,
u32 unlock_dll, u32 f, u32 sdrc_mr,
u32 inc);
extern u32 omap3_configure_core_dpll(
u32 m2, u32 unlock_dll, u32 f, u32 inc,
u32 sdrc_rfr_ctrl_0, u32 sdrc_actim_ctrl_a_0,
u32 sdrc_actim_ctrl_b_0, u32 sdrc_mr_0,
u32 sdrc_rfr_ctrl_1, u32 sdrc_actim_ctrl_a_1,
u32 sdrc_actim_ctrl_b_1, u32 sdrc_mr_1);
/* Do not use these */
extern void omap1_sram_reprogram_clock(u32 ckctl, u32 dpllctl);
@ -59,12 +60,12 @@ extern void omap243x_sram_reprogram_sdrc(u32 perf_level, u32 dll_val,
u32 mem_type);
extern unsigned long omap243x_sram_reprogram_sdrc_sz;
extern u32 omap3_sram_configure_core_dpll(u32 sdrc_rfr_ctrl,
u32 sdrc_actim_ctrla,
u32 sdrc_actim_ctrlb, u32 m2,
u32 unlock_dll, u32 f, u32 sdrc_mr,
u32 inc);
extern u32 omap3_sram_configure_core_dpll(
u32 m2, u32 unlock_dll, u32 f, u32 inc,
u32 sdrc_rfr_ctrl_0, u32 sdrc_actim_ctrl_a_0,
u32 sdrc_actim_ctrl_b_0, u32 sdrc_mr_0,
u32 sdrc_rfr_ctrl_1, u32 sdrc_actim_ctrl_a_1,
u32 sdrc_actim_ctrl_b_1, u32 sdrc_mr_1);
extern unsigned long omap3_sram_configure_core_dpll_sz;
#endif

View File

@ -44,9 +44,9 @@
#define OMAP2_SRAM_VA 0xe3000000
#define OMAP2_SRAM_PUB_VA (OMAP2_SRAM_VA + 0x800)
#define OMAP3_SRAM_PA 0x40200000
#define OMAP3_SRAM_VA 0xd7000000
#define OMAP3_SRAM_VA 0xe3000000
#define OMAP3_SRAM_PUB_PA 0x40208000
#define OMAP3_SRAM_PUB_VA 0xd7008000
#define OMAP3_SRAM_PUB_VA (OMAP3_SRAM_VA + 0x8000)
#define OMAP4_SRAM_PA 0x40200000 /*0x402f0000*/
#define OMAP4_SRAM_VA 0xd7000000 /*0xd70f0000*/
@ -373,20 +373,26 @@ static inline int omap243x_sram_init(void)
#ifdef CONFIG_ARCH_OMAP3
static u32 (*_omap3_sram_configure_core_dpll)(u32 sdrc_rfr_ctrl,
u32 sdrc_actim_ctrla,
u32 sdrc_actim_ctrlb,
u32 m2, u32 unlock_dll,
u32 f, u32 sdrc_mr, u32 inc);
u32 omap3_configure_core_dpll(u32 sdrc_rfr_ctrl, u32 sdrc_actim_ctrla,
u32 sdrc_actim_ctrlb, u32 m2, u32 unlock_dll,
u32 f, u32 sdrc_mr, u32 inc)
static u32 (*_omap3_sram_configure_core_dpll)(
u32 m2, u32 unlock_dll, u32 f, u32 inc,
u32 sdrc_rfr_ctrl_0, u32 sdrc_actim_ctrl_a_0,
u32 sdrc_actim_ctrl_b_0, u32 sdrc_mr_0,
u32 sdrc_rfr_ctrl_1, u32 sdrc_actim_ctrl_a_1,
u32 sdrc_actim_ctrl_b_1, u32 sdrc_mr_1);
u32 omap3_configure_core_dpll(u32 m2, u32 unlock_dll, u32 f, u32 inc,
u32 sdrc_rfr_ctrl_0, u32 sdrc_actim_ctrl_a_0,
u32 sdrc_actim_ctrl_b_0, u32 sdrc_mr_0,
u32 sdrc_rfr_ctrl_1, u32 sdrc_actim_ctrl_a_1,
u32 sdrc_actim_ctrl_b_1, u32 sdrc_mr_1)
{
BUG_ON(!_omap3_sram_configure_core_dpll);
return _omap3_sram_configure_core_dpll(sdrc_rfr_ctrl,
sdrc_actim_ctrla,
sdrc_actim_ctrlb, m2,
unlock_dll, f, sdrc_mr, inc);
return _omap3_sram_configure_core_dpll(
m2, unlock_dll, f, inc,
sdrc_rfr_ctrl_0, sdrc_actim_ctrl_a_0,
sdrc_actim_ctrl_b_0, sdrc_mr_0,
sdrc_rfr_ctrl_1, sdrc_actim_ctrl_a_1,
sdrc_actim_ctrl_b_1, sdrc_mr_1);
}
/* REVISIT: Should this be same as omap34xx_sram_init() after off-idle? */

View File

@ -129,7 +129,7 @@ static int s3c24xx_clkout_setparent(struct clk *clk, struct clk *parent)
/* calculate the MISCCR setting for the clock */
if (parent == &clk_xtal)
if (parent == &clk_mpll)
source = S3C2410_MISCCR_CLK0_MPLL;
else if (parent == &clk_upll)
source = S3C2410_MISCCR_CLK0_UPLL;

View File

@ -32,10 +32,12 @@
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
#ifdef CONFIG_HUGETLB_PAGE
#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#endif /* CONFIG_HUGETLB_PAGE */
#ifndef __ASSEMBLY__

View File

@ -238,7 +238,7 @@ static struct platform_device ceu1_device = {
},
};
/* KEYSC */
/* KEYSC in SoC (Needs SW33-2 set to ON) */
static struct sh_keysc_info keysc_info = {
.mode = SH_KEYSC_MODE_1,
.scan_timing = 10,
@ -255,12 +255,13 @@ static struct sh_keysc_info keysc_info = {
static struct resource keysc_resources[] = {
[0] = {
.start = 0x1a204000,
.end = 0x1a20400f,
.name = "KEYSC",
.start = 0x044b0000,
.end = 0x044b000f,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ0_KEY,
.start = 79,
.flags = IORESOURCE_IRQ,
},
};

View File

@ -26,8 +26,30 @@ ENTRY(sh_mobile_standby)
tst #SUSP_SH_SF, r0
bt skip_set_sf
#ifdef CONFIG_CPU_SUBTYPE_SH7724
/* DBSC: put memory in self-refresh mode */
/* SDRAM: disable power down and put in self-refresh mode */
mov.l dben_reg, r4
mov.l dben_data0, r1
mov.l r1, @r4
mov.l dbrfpdn0_reg, r4
mov.l dbrfpdn0_data0, r1
mov.l r1, @r4
mov.l dbcmdcnt_reg, r4
mov.l dbcmdcnt_data0, r1
mov.l r1, @r4
mov.l dbcmdcnt_reg, r4
mov.l dbcmdcnt_data1, r1
mov.l r1, @r4
mov.l dbrfpdn0_reg, r4
mov.l dbrfpdn0_data1, r1
mov.l r1, @r4
#else
/* SBSC: disable power down and put in self-refresh mode */
mov.l 1f, r4
mov.l 2f, r1
mov.l @r4, r2
@ -35,6 +57,7 @@ ENTRY(sh_mobile_standby)
mov.l 3f, r3
and r3, r2
mov.l r2, @r4
#endif
skip_set_sf:
tst #SUSP_SH_SLEEP, r0
@ -84,7 +107,36 @@ done_sleep:
tst #SUSP_SH_SF, r0
bt skip_restore_sf
/* SDRAM: set auto-refresh mode */
#ifdef CONFIG_CPU_SUBTYPE_SH7724
/* DBSC: put memory in auto-refresh mode */
mov.l dbrfpdn0_reg, r4
mov.l dbrfpdn0_data0, r1
mov.l r1, @r4
/* sleep 140 ns */
nop
nop
nop
nop
mov.l dbcmdcnt_reg, r4
mov.l dbcmdcnt_data0, r1
mov.l r1, @r4
mov.l dbcmdcnt_reg, r4
mov.l dbcmdcnt_data1, r1
mov.l r1, @r4
mov.l dben_reg, r4
mov.l dben_data1, r1
mov.l r1, @r4
mov.l dbrfpdn0_reg, r4
mov.l dbrfpdn0_data2, r1
mov.l r1, @r4
#else
/* SBSC: set auto-refresh mode */
mov.l 1f, r4
mov.l @r4, r2
mov.l 4f, r3
@ -98,15 +150,29 @@ done_sleep:
add r4, r3
or r2, r3
mov.l r3, @r1
#endif
skip_restore_sf:
rts
nop
.balign 4
#ifdef CONFIG_CPU_SUBTYPE_SH7724
dben_reg: .long 0xfd000010 /* DBEN */
dben_data0: .long 0
dben_data1: .long 1
dbrfpdn0_reg: .long 0xfd000040 /* DBRFPDN0 */
dbrfpdn0_data0: .long 0
dbrfpdn0_data1: .long 1
dbrfpdn0_data2: .long 0x00010000
dbcmdcnt_reg: .long 0xfd000014 /* DBCMDCNT */
dbcmdcnt_data0: .long 2
dbcmdcnt_data1: .long 4
#else
1: .long 0xfe400008 /* SDCR0 */
2: .long 0x00000400
3: .long 0xffff7fff
4: .long 0xfffffbff
#endif
5: .long 0xa4150020 /* STBCR */
6: .long 0xfe40001c /* RTCOR */
7: .long 0xfe400018 /* RTCNT */

View File

@ -40,6 +40,7 @@ struct sh_cmt_priv {
struct platform_device *pdev;
unsigned long flags;
unsigned long flags_suspend;
unsigned long match_value;
unsigned long next_match_value;
unsigned long max_match_value;
@ -667,11 +668,38 @@ static int __devexit sh_cmt_remove(struct platform_device *pdev)
return -EBUSY; /* cannot unregister clockevent and clocksource */
}
static int sh_cmt_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct sh_cmt_priv *p = platform_get_drvdata(pdev);
/* save flag state and stop CMT channel */
p->flags_suspend = p->flags;
sh_cmt_stop(p, p->flags);
return 0;
}
static int sh_cmt_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct sh_cmt_priv *p = platform_get_drvdata(pdev);
/* start CMT channel from saved state */
sh_cmt_start(p, p->flags_suspend);
return 0;
}
static struct dev_pm_ops sh_cmt_dev_pm_ops = {
.suspend = sh_cmt_suspend,
.resume = sh_cmt_resume,
};
static struct platform_driver sh_cmt_device_driver = {
.probe = sh_cmt_probe,
.remove = __devexit_p(sh_cmt_remove),
.driver = {
.name = "sh_cmt",
.pm = &sh_cmt_dev_pm_ops,
}
};

View File

@ -4364,6 +4364,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
if (mode == 1)
set_disk_ro(disk, 1);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
err = 0;
}
out:
mutex_unlock(&mddev->open_mutex);

View File

@ -235,6 +235,7 @@ enum vortex_chips {
CH_3C900B_FL,
CH_3C905_1,
CH_3C905_2,
CH_3C905B_TX,
CH_3C905B_1,
CH_3C905B_2,
@ -307,6 +308,8 @@ static struct vortex_chip_info {
PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
{"3c905 Boomerang 100baseT4",
PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
{"3C905B-TX Fast Etherlink XL PCI",
PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
{"3c905B Cyclone 100baseTx",
PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
@ -389,6 +392,7 @@ static struct pci_device_id vortex_pci_tbl[] = {
{ 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL },
{ 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 },
{ 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 },
{ 0x10B7, 0x9054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_TX },
{ 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 },
{ 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 },

View File

@ -515,7 +515,7 @@ static int cp_rx_poll(struct napi_struct *napi, int budget)
dma_addr_t mapping;
struct sk_buff *skb, *new_skb;
struct cp_desc *desc;
unsigned buflen;
const unsigned buflen = cp->rx_buf_sz;
skb = cp->rx_skb[rx_tail];
BUG_ON(!skb);
@ -549,8 +549,7 @@ static int cp_rx_poll(struct napi_struct *napi, int budget)
pr_debug("%s: rx slot %d status 0x%x len %d\n",
dev->name, rx_tail, status, len);
buflen = cp->rx_buf_sz + NET_IP_ALIGN;
new_skb = netdev_alloc_skb(dev, buflen);
new_skb = netdev_alloc_skb(dev, buflen + NET_IP_ALIGN);
if (!new_skb) {
dev->stats.rx_dropped++;
goto rx_next;

View File

@ -232,11 +232,11 @@ static void atl1c_get_drvinfo(struct net_device *netdev,
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
strncpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
strncpy(drvinfo->version, atl1c_driver_version,
strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, atl1c_driver_version,
sizeof(drvinfo->version));
strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0;
drvinfo->testinfo_len = 0;

View File

@ -3378,11 +3378,11 @@ static void atl1_get_drvinfo(struct net_device *netdev,
{
struct atl1_adapter *adapter = netdev_priv(netdev);
strncpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
strncpy(drvinfo->version, ATLX_DRIVER_VERSION,
strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, ATLX_DRIVER_VERSION,
sizeof(drvinfo->version));
strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->eedump_len = ATL1_EEDUMP_LEN;
}

View File

@ -952,9 +952,10 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
int rc = NETDEV_TX_OK;
dma_addr_t mapping;
u32 len, entry, ctrl;
unsigned long flags;
len = skb->len;
spin_lock_irq(&bp->lock);
spin_lock_irqsave(&bp->lock, flags);
/* This is a hard error, log it. */
if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
@ -1027,7 +1028,7 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
out_unlock:
spin_unlock_irq(&bp->lock);
spin_unlock_irqrestore(&bp->lock, flags);
return rc;

View File

@ -399,9 +399,11 @@ static int bnx2_unregister_cnic(struct net_device *dev)
struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
mutex_lock(&bp->cnic_lock);
cp->drv_state = 0;
bnapi->cnic_present = 0;
rcu_assign_pointer(bp->cnic_ops, NULL);
mutex_unlock(&bp->cnic_lock);
synchronize_rcu();
return 0;
}
@ -429,13 +431,13 @@ bnx2_cnic_stop(struct bnx2 *bp)
struct cnic_ops *c_ops;
struct cnic_ctl_info info;
rcu_read_lock();
c_ops = rcu_dereference(bp->cnic_ops);
mutex_lock(&bp->cnic_lock);
c_ops = bp->cnic_ops;
if (c_ops) {
info.cmd = CNIC_CTL_STOP_CMD;
c_ops->cnic_ctl(bp->cnic_data, &info);
}
rcu_read_unlock();
mutex_unlock(&bp->cnic_lock);
}
static void
@ -444,8 +446,8 @@ bnx2_cnic_start(struct bnx2 *bp)
struct cnic_ops *c_ops;
struct cnic_ctl_info info;
rcu_read_lock();
c_ops = rcu_dereference(bp->cnic_ops);
mutex_lock(&bp->cnic_lock);
c_ops = bp->cnic_ops;
if (c_ops) {
if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
@ -455,7 +457,7 @@ bnx2_cnic_start(struct bnx2 *bp)
info.cmd = CNIC_CTL_START_CMD;
c_ops->cnic_ctl(bp->cnic_data, &info);
}
rcu_read_unlock();
mutex_unlock(&bp->cnic_lock);
}
#else
@ -7663,6 +7665,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
spin_lock_init(&bp->phy_lock);
spin_lock_init(&bp->indirect_lock);
#ifdef BCM_CNIC
mutex_init(&bp->cnic_lock);
#endif
INIT_WORK(&bp->reset_task, bnx2_reset_task);
dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);

View File

@ -6902,6 +6902,7 @@ struct bnx2 {
u32 idle_chk_status_idx;
#ifdef BCM_CNIC
struct mutex cnic_lock;
struct cnic_eth_dev cnic_eth_dev;
#endif

View File

@ -611,11 +611,18 @@ static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
return -EMSGSIZE;
}
static int can_newlink(struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
return -EOPNOTSUPP;
}
static struct rtnl_link_ops can_link_ops __read_mostly = {
.kind = "can",
.maxtype = IFLA_CAN_MAX,
.policy = can_policy,
.setup = can_setup,
.newlink = can_newlink,
.changelink = can_changelink,
.fill_info = can_fill_info,
.fill_xstats = can_fill_xstats,

View File

@ -138,6 +138,16 @@ static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
return NULL;
}
static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
{
atomic_inc(&ulp_ops->ref_count);
}
static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
{
atomic_dec(&ulp_ops->ref_count);
}
static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
{
struct cnic_local *cp = dev->cnic_priv;
@ -358,6 +368,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
}
read_unlock(&cnic_dev_lock);
atomic_set(&ulp_ops->ref_count, 0);
rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
mutex_unlock(&cnic_lock);
@ -379,6 +390,8 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
int cnic_unregister_driver(int ulp_type)
{
struct cnic_dev *dev;
struct cnic_ulp_ops *ulp_ops;
int i = 0;
if (ulp_type >= MAX_CNIC_ULP_TYPE) {
printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
@ -386,7 +399,8 @@ int cnic_unregister_driver(int ulp_type)
return -EINVAL;
}
mutex_lock(&cnic_lock);
if (!cnic_ulp_tbl[ulp_type]) {
ulp_ops = cnic_ulp_tbl[ulp_type];
if (!ulp_ops) {
printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not "
"been registered\n", ulp_type);
goto out_unlock;
@ -411,6 +425,14 @@ int cnic_unregister_driver(int ulp_type)
mutex_unlock(&cnic_lock);
synchronize_rcu();
while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
msleep(100);
i++;
}
if (atomic_read(&ulp_ops->ref_count) != 0)
printk(KERN_WARNING PFX "%s: Failed waiting for ref count to go"
" to zero.\n", dev->netdev->name);
return 0;
out_unlock:
@ -466,6 +488,7 @@ EXPORT_SYMBOL(cnic_register_driver);
static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
{
struct cnic_local *cp = dev->cnic_priv;
int i = 0;
if (ulp_type >= MAX_CNIC_ULP_TYPE) {
printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n",
@ -486,6 +509,15 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
synchronize_rcu();
while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
i < 20) {
msleep(100);
i++;
}
if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
printk(KERN_WARNING PFX "%s: Failed waiting for ULP up call"
" to complete.\n", dev->netdev->name);
return 0;
}
EXPORT_SYMBOL(cnic_unregister_driver);
@ -1076,18 +1108,23 @@ static void cnic_ulp_stop(struct cnic_dev *dev)
if (cp->cnic_uinfo)
cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
rcu_read_lock();
for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
struct cnic_ulp_ops *ulp_ops;
ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
if (!ulp_ops)
mutex_lock(&cnic_lock);
ulp_ops = cp->ulp_ops[if_type];
if (!ulp_ops) {
mutex_unlock(&cnic_lock);
continue;
}
set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
mutex_unlock(&cnic_lock);
if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
}
rcu_read_unlock();
}
static void cnic_ulp_start(struct cnic_dev *dev)
@ -1095,18 +1132,23 @@ static void cnic_ulp_start(struct cnic_dev *dev)
struct cnic_local *cp = dev->cnic_priv;
int if_type;
rcu_read_lock();
for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
struct cnic_ulp_ops *ulp_ops;
ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
if (!ulp_ops || !ulp_ops->cnic_start)
mutex_lock(&cnic_lock);
ulp_ops = cp->ulp_ops[if_type];
if (!ulp_ops || !ulp_ops->cnic_start) {
mutex_unlock(&cnic_lock);
continue;
}
set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
mutex_unlock(&cnic_lock);
if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
ulp_ops->cnic_start(cp->ulp_handle[if_type]);
clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
}
rcu_read_unlock();
}
static int cnic_ctl(void *data, struct cnic_ctl_info *info)
@ -1116,22 +1158,18 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
switch (info->cmd) {
case CNIC_CTL_STOP_CMD:
cnic_hold(dev);
mutex_lock(&cnic_lock);
cnic_ulp_stop(dev);
cnic_stop_hw(dev);
mutex_unlock(&cnic_lock);
cnic_put(dev);
break;
case CNIC_CTL_START_CMD:
cnic_hold(dev);
mutex_lock(&cnic_lock);
if (!cnic_start_hw(dev))
cnic_ulp_start(dev);
mutex_unlock(&cnic_lock);
cnic_put(dev);
break;
default:
@ -1145,19 +1183,23 @@ static void cnic_ulp_init(struct cnic_dev *dev)
int i;
struct cnic_local *cp = dev->cnic_priv;
rcu_read_lock();
for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
struct cnic_ulp_ops *ulp_ops;
ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
if (!ulp_ops || !ulp_ops->cnic_init)
mutex_lock(&cnic_lock);
ulp_ops = cnic_ulp_tbl[i];
if (!ulp_ops || !ulp_ops->cnic_init) {
mutex_unlock(&cnic_lock);
continue;
}
ulp_get(ulp_ops);
mutex_unlock(&cnic_lock);
if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
ulp_ops->cnic_init(dev);
ulp_put(ulp_ops);
}
rcu_read_unlock();
}
static void cnic_ulp_exit(struct cnic_dev *dev)
@ -1165,19 +1207,23 @@ static void cnic_ulp_exit(struct cnic_dev *dev)
int i;
struct cnic_local *cp = dev->cnic_priv;
rcu_read_lock();
for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
struct cnic_ulp_ops *ulp_ops;
ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
if (!ulp_ops || !ulp_ops->cnic_exit)
mutex_lock(&cnic_lock);
ulp_ops = cnic_ulp_tbl[i];
if (!ulp_ops || !ulp_ops->cnic_exit) {
mutex_unlock(&cnic_lock);
continue;
}
ulp_get(ulp_ops);
mutex_unlock(&cnic_lock);
if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
ulp_ops->cnic_exit(dev);
ulp_put(ulp_ops);
}
rcu_read_unlock();
}
static int cnic_cm_offload_pg(struct cnic_sock *csk)
@ -2393,6 +2439,37 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
return 0;
}
static int cnic_register_netdev(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
int err;
if (!ethdev)
return -ENODEV;
if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
return 0;
err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
if (err)
printk(KERN_ERR PFX "%s: register_cnic failed\n",
dev->netdev->name);
return err;
}
static void cnic_unregister_netdev(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
if (!ethdev)
return;
ethdev->drv_unregister_cnic(dev->netdev);
}
static int cnic_start_hw(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
@ -2402,13 +2479,6 @@ static int cnic_start_hw(struct cnic_dev *dev)
if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
return -EALREADY;
err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
if (err) {
printk(KERN_ERR PFX "%s: register_cnic failed\n",
dev->netdev->name);
goto err2;
}
dev->regview = ethdev->io_base;
cp->chip_id = ethdev->chip_id;
pci_dev_get(dev->pcidev);
@ -2438,18 +2508,13 @@ static int cnic_start_hw(struct cnic_dev *dev)
return 0;
err1:
ethdev->drv_unregister_cnic(dev->netdev);
cp->free_resc(dev);
pci_dev_put(dev->pcidev);
err2:
return err;
}
static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
cnic_disable_bnx2_int_sync(dev);
cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
@ -2461,8 +2526,6 @@ static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
cnic_setup_5709_context(dev, 0);
cnic_free_irq(dev);
ethdev->drv_unregister_cnic(dev->netdev);
cnic_free_resc(dev);
}
@ -2543,7 +2606,7 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
probe = symbol_get(bnx2_cnic_probe);
if (probe) {
ethdev = (*probe)(dev);
symbol_put_addr(probe);
symbol_put(bnx2_cnic_probe);
}
if (!ethdev)
return NULL;
@ -2646,10 +2709,12 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
else if (event == NETDEV_UNREGISTER)
cnic_ulp_exit(dev);
else if (event == NETDEV_UP) {
mutex_lock(&cnic_lock);
if (cnic_register_netdev(dev) != 0) {
cnic_put(dev);
goto done;
}
if (!cnic_start_hw(dev))
cnic_ulp_start(dev);
mutex_unlock(&cnic_lock);
}
rcu_read_lock();
@ -2668,10 +2733,9 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
rcu_read_unlock();
if (event == NETDEV_GOING_DOWN) {
mutex_lock(&cnic_lock);
cnic_ulp_stop(dev);
cnic_stop_hw(dev);
mutex_unlock(&cnic_lock);
cnic_unregister_netdev(dev);
} else if (event == NETDEV_UNREGISTER) {
write_lock(&cnic_dev_lock);
list_del_init(&dev->list);
@ -2703,6 +2767,7 @@ static void cnic_release(void)
}
cnic_ulp_exit(dev);
cnic_unregister_netdev(dev);
list_del_init(&dev->list);
cnic_free_dev(dev);
}

View File

@ -176,6 +176,7 @@ struct cnic_local {
unsigned long ulp_flags[MAX_CNIC_ULP_TYPE];
#define ULP_F_INIT 0
#define ULP_F_START 1
#define ULP_F_CALL_PENDING 2
struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
/* protected by ulp_lock */

View File

@ -290,6 +290,7 @@ struct cnic_ulp_ops {
void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type,
char *data, u16 data_size);
struct module *owner;
atomic_t ref_count;
};
extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);

View File

@ -338,10 +338,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
union ich8_hws_flash_status hsfsts;
u32 gfpreg;
u32 sector_base_addr;
u32 sector_end_addr;
u32 gfpreg, sector_base_addr, sector_end_addr;
u16 i;
/* Can't read flash registers if the register set isn't mapped. */
@ -375,20 +372,6 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
/* Adjust to word count */
nvm->flash_bank_size /= sizeof(u16);
/*
* Make sure the flash bank size does not overwrite the 4k
* sector ranges. We may have 64k allotted to us but we only care
* about the first 2 4k sectors. Therefore, if we have anything less
* than 64k set in the HSFSTS register, we will reduce the bank size
* down to 4k and let the rest remain unused. If berasesz == 3, then
* we are working in 64k mode. Otherwise we are not.
*/
if (nvm->flash_bank_size > E1000_ICH8_SHADOW_RAM_WORDS) {
hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
if (hsfsts.hsf_status.berasesz != 3)
nvm->flash_bank_size = E1000_ICH8_SHADOW_RAM_WORDS;
}
nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
/* Clear shadow ram */
@ -594,8 +577,8 @@ static DEFINE_MUTEX(nvm_mutex);
**/
static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
{
u32 extcnf_ctrl;
u32 timeout = PHY_CFG_TIMEOUT;
u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
s32 ret_val = 0;
might_sleep();
@ -603,28 +586,46 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
while (timeout) {
extcnf_ctrl = er32(EXTCNF_CTRL);
if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) {
extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
ew32(EXTCNF_CTRL, extcnf_ctrl);
extcnf_ctrl = er32(EXTCNF_CTRL);
if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
break;
}
mdelay(1);
timeout--;
}
if (!timeout) {
hw_dbg(hw, "FW or HW has locked the resource for too long.\n");
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
ew32(EXTCNF_CTRL, extcnf_ctrl);
mutex_unlock(&nvm_mutex);
return -E1000_ERR_CONFIG;
hw_dbg(hw, "SW/FW/HW has locked the resource for too long.\n");
ret_val = -E1000_ERR_CONFIG;
goto out;
}
return 0;
timeout = PHY_CFG_TIMEOUT * 2;
extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
ew32(EXTCNF_CTRL, extcnf_ctrl);
while (timeout) {
extcnf_ctrl = er32(EXTCNF_CTRL);
if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
break;
mdelay(1);
timeout--;
}
if (!timeout) {
hw_dbg(hw, "Failed to acquire the semaphore.\n");
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
ew32(EXTCNF_CTRL, extcnf_ctrl);
ret_val = -E1000_ERR_CONFIG;
goto out;
}
out:
if (ret_val)
mutex_unlock(&nvm_mutex);
return ret_val;
}
/**
@ -1306,7 +1307,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
struct e1000_nvm_info *nvm = &hw->nvm;
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
u32 act_offset;
s32 ret_val;
s32 ret_val = 0;
u32 bank = 0;
u16 i, word;
@ -1321,12 +1322,15 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
goto out;
ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
if (ret_val)
goto release;
if (ret_val) {
hw_dbg(hw, "Could not detect valid bank, assuming bank 0\n");
bank = 0;
}
act_offset = (bank) ? nvm->flash_bank_size : 0;
act_offset += offset;
ret_val = 0;
for (i = 0; i < words; i++) {
if ((dev_spec->shadow_ram) &&
(dev_spec->shadow_ram[offset+i].modified)) {
@ -1341,7 +1345,6 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
}
}
release:
e1000_release_swflag_ich8lan(hw);
out:
@ -1592,7 +1595,6 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
{
struct e1000_nvm_info *nvm = &hw->nvm;
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
s32 ret_val;
u16 i;
if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
@ -1601,17 +1603,11 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
return -E1000_ERR_NVM;
}
ret_val = e1000_acquire_swflag_ich8lan(hw);
if (ret_val)
return ret_val;
for (i = 0; i < words; i++) {
dev_spec->shadow_ram[offset+i].modified = 1;
dev_spec->shadow_ram[offset+i].value = data[i];
}
e1000_release_swflag_ich8lan(hw);
return 0;
}
@ -1652,8 +1648,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
*/
ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
if (ret_val) {
e1000_release_swflag_ich8lan(hw);
goto out;
hw_dbg(hw, "Could not detect valid bank, assuming bank 0\n");
bank = 0;
}
if (bank == 0) {
@ -2039,12 +2035,8 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
iteration = 1;
break;
case 2:
if (hw->mac.type == e1000_ich9lan) {
sector_size = ICH_FLASH_SEG_SIZE_8K;
iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_8K;
} else {
return -E1000_ERR_NVM;
}
iteration = 1;
break;
case 3:
sector_size = ICH_FLASH_SEG_SIZE_64K;
@ -2056,7 +2048,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
/* Start with the base address, then add the sector offset. */
flash_linear_addr = hw->nvm.flash_base_addr;
flash_linear_addr += (bank) ? (sector_size * iteration) : 0;
flash_linear_addr += (bank) ? flash_bank_size : 0;
for (j = 0; j < iteration ; j++) {
do {

View File

@ -4538,8 +4538,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
/* Allow time for pending master requests to run */
e1000e_disable_pcie_master(&adapter->hw);
if ((adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) &&
!(hw->mac.ops.check_mng_mode(hw))) {
if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
/* enable wakeup by the PHY */
retval = e1000_init_phy_wakeup(adapter, wufc);
if (retval)
@ -4557,7 +4556,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
*enable_wake = !!wufc;
/* make sure adapter isn't asleep if manageability is enabled */
if (adapter->flags & FLAG_MNG_PT_ENABLED)
if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
(hw->mac.ops.check_mng_mode(hw)))
*enable_wake = true;
if (adapter->hw.phy.type == e1000_phy_igp_3)
@ -4670,14 +4670,6 @@ static int e1000_resume(struct pci_dev *pdev)
return err;
}
/* AER (Advanced Error Reporting) hooks */
err = pci_enable_pcie_error_reporting(pdev);
if (err) {
dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
"0x%x\n", err);
/* non-fatal, continue */
}
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
@ -4990,6 +4982,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (err)
goto err_pci_reg;
/* AER (Advanced Error Reporting) hooks */
err = pci_enable_pcie_error_reporting(pdev);
if (err) {
dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
"0x%x\n", err);
/* non-fatal, continue */
}
pci_set_master(pdev);
/* PCI config space info */
err = pci_save_state(pdev);

View File

@ -285,6 +285,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
struct bufdesc *bdp;
void *bufaddr;
unsigned short status;
unsigned long flags;
@ -312,7 +313,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
status &= ~BD_ENET_TX_STATS;
/* Set buffer length and buffer pointer */
bdp->cbd_bufaddr = __pa(skb->data);
bufaddr = skb->data;
bdp->cbd_datlen = skb->len;
/*
@ -320,11 +321,11 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
* 4-byte boundaries. Use bounce buffers to copy data
* and get it aligned. Ugh.
*/
if (bdp->cbd_bufaddr & FEC_ALIGNMENT) {
if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
unsigned int index;
index = bdp - fep->tx_bd_base;
memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len);
bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]);
bufaddr = fep->tx_bounce[index];
}
/* Save skb pointer */
@ -336,7 +337,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Push the data cache so the CPM does not get stale memory
* data.
*/
bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data,
bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr,
FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
/* Send it on its way. Tell FEC it's ready, interrupt when done,

View File

@ -936,6 +936,7 @@ int startup_gfar(struct net_device *dev)
struct gfar __iomem *regs = priv->regs;
int err = 0;
u32 rctrl = 0;
u32 tctrl = 0;
u32 attrs = 0;
gfar_write(&regs->imask, IMASK_INIT_CLEAR);
@ -1111,11 +1112,19 @@ int startup_gfar(struct net_device *dev)
rctrl |= RCTRL_PADDING(priv->padding);
}
/* keep vlan related bits if it's enabled */
if (priv->vlgrp) {
rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
tctrl |= TCTRL_VLINS;
}
/* Init rctrl based on our settings */
gfar_write(&priv->regs->rctrl, rctrl);
if (dev->features & NETIF_F_IP_CSUM)
gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM);
tctrl |= TCTRL_INIT_CSUM;
gfar_write(&priv->regs->tctrl, tctrl);
/* Set the extraction length and index */
attrs = ATTRELI_EL(priv->rx_stash_size) |
@ -1450,7 +1459,6 @@ static void gfar_vlan_rx_register(struct net_device *dev,
/* Enable VLAN tag extraction */
tempval = gfar_read(&priv->regs->rctrl);
tempval |= RCTRL_VLEX;
tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
gfar_write(&priv->regs->rctrl, tempval);
} else {

View File

@ -115,7 +115,7 @@ static int __init w83977af_init(void)
IRDA_DEBUG(0, "%s()\n", __func__ );
for (i=0; (io[i] < 2000) && (i < ARRAY_SIZE(dev_self)); i++) {
for (i=0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
return 0;
}

View File

@ -136,6 +136,8 @@ struct ixgbe_ring {
u8 queue_index; /* needed for multiqueue queue management */
#define IXGBE_RING_RX_PS_ENABLED (u8)(1)
u8 flags; /* per ring feature flags */
u16 head;
u16 tail;

View File

@ -1948,6 +1948,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_q_vector *q_vector;
int i;
if (ec->tx_max_coalesced_frames_irq)
@ -1982,8 +1983,12 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
adapter->itr_setting = 0;
}
for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
/* MSI/MSIx Interrupt Mode */
if (adapter->flags &
(IXGBE_FLAG_MSIX_ENABLED | IXGBE_FLAG_MSI_ENABLED)) {
int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
for (i = 0; i < num_vectors; i++) {
q_vector = adapter->q_vector[i];
if (q_vector->txr_count && !q_vector->rxr_count)
/* tx vector gets half the rate */
q_vector->eitr = (adapter->eitr_param >> 1);
@ -1992,6 +1997,12 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
q_vector->eitr = adapter->eitr_param;
ixgbe_write_eitr(q_vector);
}
/* Legacy Interrupt Mode */
} else {
q_vector = adapter->q_vector[0];
q_vector->eitr = adapter->eitr_param;
ixgbe_write_eitr(q_vector);
}
return 0;
}

View File

@ -336,7 +336,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
/* return 0 to bypass going to ULD for DDPed data */
if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP)
rc = 0;
else
else if (ddp->len)
rc = ddp->len;
}

View File

@ -492,12 +492,12 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
skb_record_rx_queue(skb, ring->queue_index);
if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
if (adapter->vlgrp && is_vlan && (tag != 0))
if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
else
napi_gro_receive(napi, skb);
} else {
if (adapter->vlgrp && is_vlan && (tag != 0))
if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
else
netif_rx(skb);
@ -585,7 +585,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
if (!bi->page_dma &&
(adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
(rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
if (!bi->page) {
bi->page = alloc_page(GFP_ATOMIC);
if (!bi->page) {
@ -629,7 +629,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
} else {
@ -726,7 +726,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
break;
(*work_done)++;
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
IXGBE_RXDADV_HDRBUFLEN_SHIFT;
@ -798,7 +798,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
rx_ring->stats.packets++;
rx_ring->stats.bytes += skb->len;
} else {
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
rx_buffer_info->skb = next_buffer->skb;
rx_buffer_info->dma = next_buffer->dma;
next_buffer->skb = skb;
@ -1898,46 +1898,19 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring)
{
struct ixgbe_ring *rx_ring;
u32 srrctl;
int queue0 = 0;
unsigned long mask;
int index;
struct ixgbe_ring_feature *feature = adapter->ring_feature;
if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
int dcb_i = feature[RING_F_DCB].indices;
if (dcb_i == 8)
queue0 = index >> 4;
else if (dcb_i == 4)
queue0 = index >> 5;
else
dev_err(&adapter->pdev->dev, "Invalid DCB "
"configuration\n");
#ifdef IXGBE_FCOE
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
struct ixgbe_ring_feature *f;
rx_ring = &adapter->rx_ring[queue0];
f = &adapter->ring_feature[RING_F_FCOE];
if ((queue0 == 0) && (index > rx_ring->reg_idx))
queue0 = f->mask + index -
rx_ring->reg_idx - 1;
}
#endif /* IXGBE_FCOE */
} else {
queue0 = index;
}
} else {
index = rx_ring->reg_idx;
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
unsigned long mask;
mask = (unsigned long) feature[RING_F_RSS].mask;
queue0 = index & mask;
index = index & mask;
}
rx_ring = &adapter->rx_ring[queue0];
srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
@ -1946,7 +1919,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
IXGBE_SRRCTL_BSIZEHDR_MASK;
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
#else
@ -2002,6 +1975,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
{
u64 rdba;
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_ring *rx_ring;
struct net_device *netdev = adapter->netdev;
int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
int i, j;
@ -2018,11 +1992,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
/* Decide whether to use packet split mode or not */
adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
#ifdef IXGBE_FCOE
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
#endif /* IXGBE_FCOE */
/* Set the RX buffer length according to the mode */
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
rx_buf_len = IXGBE_RX_HDR_SIZE;
@ -2070,29 +2039,35 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
* the Base and Length of the Rx Descriptor Ring
*/
for (i = 0; i < adapter->num_rx_queues; i++) {
rdba = adapter->rx_ring[i].dma;
j = adapter->rx_ring[i].reg_idx;
rx_ring = &adapter->rx_ring[i];
rdba = rx_ring->dma;
j = rx_ring->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
adapter->rx_ring[i].head = IXGBE_RDH(j);
adapter->rx_ring[i].tail = IXGBE_RDT(j);
adapter->rx_ring[i].rx_buf_len = rx_buf_len;
rx_ring->head = IXGBE_RDH(j);
rx_ring->tail = IXGBE_RDT(j);
rx_ring->rx_buf_len = rx_buf_len;
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
#ifdef IXGBE_FCOE
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
struct ixgbe_ring_feature *f;
f = &adapter->ring_feature[RING_F_FCOE];
if ((rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
(i >= f->mask) && (i < f->mask + f->indices))
adapter->rx_ring[i].rx_buf_len =
if ((i >= f->mask) && (i < f->mask + f->indices)) {
rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
rx_ring->rx_buf_len =
IXGBE_FCOE_JUMBO_FRAME_SIZE;
}
}
#endif /* IXGBE_FCOE */
ixgbe_configure_srrctl(adapter, j);
ixgbe_configure_srrctl(adapter, rx_ring);
}
if (hw->mac.type == ixgbe_mac_82598EB) {
@ -2168,7 +2143,8 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
/* Enable 82599 HW-RSC */
for (i = 0; i < adapter->num_rx_queues; i++) {
j = adapter->rx_ring[i].reg_idx;
rx_ring = &adapter->rx_ring[i];
j = rx_ring->reg_idx;
rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
rscctrl |= IXGBE_RSCCTL_RSCEN;
/*
@ -2176,7 +2152,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
* total size of max desc * buf_len is not greater
* than 65535
*/
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
#if (MAX_SKB_FRAGS > 16)
rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
#elif (MAX_SKB_FRAGS > 8)

View File

@ -506,6 +506,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
PCI_DMA_FROMDEVICE);
}
/* Adjust size of last fragment to match actual length */
if (nr > 0)
skb_frags_rx[nr - 1].size = length -
priv->frag_info[nr - 1].frag_prefix_size;
return nr;

View File

@ -1254,7 +1254,7 @@ struct netxen_adapter {
u8 mc_enabled;
u8 max_mc_count;
u8 rss_supported;
u8 resv2;
u8 link_changed;
u32 resv3;
u8 has_link_events;

View File

@ -184,13 +184,6 @@ void netxen_free_sw_resources(struct netxen_adapter *adapter)
kfree(recv_ctx->rds_rings);
skip_rds:
if (recv_ctx->sds_rings == NULL)
goto skip_sds;
for(ring = 0; ring < adapter->max_sds_rings; ring++)
recv_ctx->sds_rings[ring].consumer = 0;
skip_sds:
if (adapter->tx_ring == NULL)
return;

View File

@ -94,10 +94,6 @@ static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
static struct workqueue_struct *netxen_workq;
#define SCHEDULE_WORK(tp) queue_work(netxen_workq, tp)
#define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq)
static void netxen_watchdog(unsigned long);
static uint32_t crb_cmd_producer[4] = {
@ -171,6 +167,8 @@ netxen_free_sds_rings(struct netxen_recv_context *recv_ctx)
{
if (recv_ctx->sds_rings != NULL)
kfree(recv_ctx->sds_rings);
recv_ctx->sds_rings = NULL;
}
static int
@ -192,6 +190,21 @@ netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
return 0;
}
static void
netxen_napi_del(struct netxen_adapter *adapter)
{
int ring;
struct nx_host_sds_ring *sds_ring;
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
netif_napi_del(&sds_ring->napi);
}
netxen_free_sds_rings(&adapter->recv_ctx);
}
static void
netxen_napi_enable(struct netxen_adapter *adapter)
{
@ -260,7 +273,7 @@ nx_update_dma_mask(struct netxen_adapter *adapter)
change = 0;
shift = NXRD32(adapter, CRB_DMA_SHIFT);
if (shift >= 32)
if (shift > 32)
return 0;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9))
@ -272,7 +285,7 @@ nx_update_dma_mask(struct netxen_adapter *adapter)
old_mask = pdev->dma_mask;
old_cmask = pdev->dev.coherent_dma_mask;
mask = (1ULL<<(32+shift)) - 1;
mask = DMA_BIT_MASK(32+shift);
err = pci_set_dma_mask(pdev, mask);
if (err)
@ -880,7 +893,6 @@ netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
spin_unlock(&adapter->tx_clean_lock);
del_timer_sync(&adapter->watchdog_timer);
FLUSH_SCHEDULED_WORK();
}
@ -894,10 +906,12 @@ netxen_nic_attach(struct netxen_adapter *adapter)
struct nx_host_tx_ring *tx_ring;
err = netxen_init_firmware(adapter);
if (err != 0) {
printk(KERN_ERR "Failed to init firmware\n");
return -EIO;
}
if (err)
return err;
err = netxen_napi_add(adapter, netdev);
if (err)
return err;
if (adapter->fw_major < 4)
adapter->max_rds_rings = 3;
@ -961,6 +975,7 @@ netxen_nic_detach(struct netxen_adapter *adapter)
netxen_free_hw_resources(adapter);
netxen_release_rx_buffers(adapter);
netxen_nic_free_irq(adapter);
netxen_napi_del(adapter);
netxen_free_sw_resources(adapter);
adapter->is_up = 0;
@ -1105,9 +1120,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->irq = adapter->msix_entries[0].vector;
if (netxen_napi_add(adapter, netdev))
goto err_out_disable_msi;
init_timer(&adapter->watchdog_timer);
adapter->watchdog_timer.function = &netxen_watchdog;
adapter->watchdog_timer.data = (unsigned long)adapter;
@ -1177,6 +1189,9 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
cancel_work_sync(&adapter->watchdog_task);
cancel_work_sync(&adapter->tx_timeout_task);
if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) {
netxen_nic_detach(adapter);
}
@ -1185,7 +1200,6 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
netxen_free_adapter_offload(adapter);
netxen_teardown_intr(adapter);
netxen_free_sds_rings(&adapter->recv_ctx);
netxen_cleanup_pci_map(adapter);
@ -1211,6 +1225,9 @@ netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
if (netif_running(netdev))
netxen_nic_down(adapter, netdev);
cancel_work_sync(&adapter->watchdog_task);
cancel_work_sync(&adapter->tx_timeout_task);
if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
netxen_nic_detach(adapter);
@ -1549,11 +1566,6 @@ static int netxen_nic_check_temp(struct netxen_adapter *adapter)
"%s: Device temperature %d degrees C exceeds"
" maximum allowed. Hardware has been shut down.\n",
netdev->name, temp_val);
netif_device_detach(netdev);
netxen_nic_down(adapter, netdev);
netxen_nic_detach(adapter);
rv = 1;
} else if (temp_state == NX_TEMP_WARN) {
if (adapter->temp == NX_TEMP_NORMAL) {
@ -1587,10 +1599,7 @@ void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup)
netif_carrier_off(netdev);
netif_stop_queue(netdev);
}
if (!adapter->has_link_events)
netxen_nic_set_link_parameters(adapter);
adapter->link_changed = !adapter->has_link_events;
} else if (!adapter->ahw.linkup && linkup) {
printk(KERN_INFO "%s: %s NIC Link is up\n",
netxen_nic_driver_name, netdev->name);
@ -1599,9 +1608,7 @@ void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup)
netif_carrier_on(netdev);
netif_wake_queue(netdev);
}
if (!adapter->has_link_events)
netxen_nic_set_link_parameters(adapter);
adapter->link_changed = !adapter->has_link_events;
}
}
@ -1628,11 +1635,36 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
netxen_advert_link_change(adapter, linkup);
}
static void netxen_nic_thermal_shutdown(struct netxen_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
netif_device_detach(netdev);
netxen_nic_down(adapter, netdev);
netxen_nic_detach(adapter);
}
static void netxen_watchdog(unsigned long v)
{
struct netxen_adapter *adapter = (struct netxen_adapter *)v;
SCHEDULE_WORK(&adapter->watchdog_task);
if (netxen_nic_check_temp(adapter))
goto do_sched;
if (!adapter->has_link_events) {
netxen_nic_handle_phy_intr(adapter);
if (adapter->link_changed)
goto do_sched;
}
if (netif_running(adapter->netdev))
mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
return;
do_sched:
schedule_work(&adapter->watchdog_task);
}
void netxen_watchdog_task(struct work_struct *work)
@ -1640,11 +1672,13 @@ void netxen_watchdog_task(struct work_struct *work)
struct netxen_adapter *adapter =
container_of(work, struct netxen_adapter, watchdog_task);
if (netxen_nic_check_temp(adapter))
if (adapter->temp == NX_TEMP_PANIC) {
netxen_nic_thermal_shutdown(adapter);
return;
}
if (!adapter->has_link_events)
netxen_nic_handle_phy_intr(adapter);
if (adapter->link_changed)
netxen_nic_set_link_parameters(adapter);
if (netif_running(adapter->netdev))
mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
@ -1652,9 +1686,8 @@ void netxen_watchdog_task(struct work_struct *work)
static void netxen_tx_timeout(struct net_device *netdev)
{
struct netxen_adapter *adapter = (struct netxen_adapter *)
netdev_priv(netdev);
SCHEDULE_WORK(&adapter->tx_timeout_task);
struct netxen_adapter *adapter = netdev_priv(netdev);
schedule_work(&adapter->tx_timeout_task);
}
static void netxen_tx_timeout_task(struct work_struct *work)
@ -1811,9 +1844,6 @@ static int __init netxen_init_module(void)
{
printk(KERN_INFO "%s\n", netxen_nic_driver_string);
if ((netxen_workq = create_singlethread_workqueue("netxen")) == NULL)
return -ENOMEM;
return pci_register_driver(&netxen_driver);
}
@ -1822,7 +1852,6 @@ module_init(netxen_init_module);
static void __exit netxen_exit_module(void)
{
pci_unregister_driver(&netxen_driver);
destroy_workqueue(netxen_workq);
}
module_exit(netxen_exit_module);

View File

@ -1839,7 +1839,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
lp->chip_version = chip_version;
lp->msg_enable = pcnet32_debug;
if ((cards_found >= MAX_UNITS)
|| (options[cards_found] > sizeof(options_mapping)))
|| (options[cards_found] >= sizeof(options_mapping)))
lp->options = PCNET32_PORT_ASEL;
else
lp->options = options_mapping[options[cards_found]];

View File

@ -652,8 +652,9 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
int entry;
u32 flag;
dma_addr_t mapping;
unsigned long flags;
spin_lock_irq(&tp->lock);
spin_lock_irqsave(&tp->lock, flags);
/* Calculate the next Tx descriptor entry. */
entry = tp->cur_tx % TX_RING_SIZE;
@ -688,7 +689,7 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Trigger an immediate transmit demand. */
iowrite32(0, tp->base_addr + CSR1);
spin_unlock_irq(&tp->lock);
spin_unlock_irqrestore(&tp->lock, flags);
dev->trans_start = jiffies;

View File

@ -1048,20 +1048,15 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
return err;
}
static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr)
static int tun_get_iff(struct net *net, struct tun_struct *tun,
struct ifreq *ifr)
{
struct tun_struct *tun = tun_get(file);
if (!tun)
return -EBADFD;
DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name);
strcpy(ifr->ifr_name, tun->dev->name);
ifr->ifr_flags = tun_flags(tun);
tun_put(tun);
return 0;
}
@ -1105,8 +1100,8 @@ static int set_offload(struct net_device *dev, unsigned long arg)
return 0;
}
static int tun_chr_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
static long tun_chr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun;
@ -1128,34 +1123,32 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
(unsigned int __user*)argp);
}
rtnl_lock();
tun = __tun_get(tfile);
if (cmd == TUNSETIFF && !tun) {
int err;
ifr.ifr_name[IFNAMSIZ-1] = '\0';
rtnl_lock();
err = tun_set_iff(tfile->net, file, &ifr);
rtnl_unlock();
ret = tun_set_iff(tfile->net, file, &ifr);
if (err)
return err;
if (ret)
goto unlock;
if (copy_to_user(argp, &ifr, sizeof(ifr)))
return -EFAULT;
return 0;
ret = -EFAULT;
goto unlock;
}
ret = -EBADFD;
if (!tun)
return -EBADFD;
goto unlock;
DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd);
ret = 0;
switch (cmd) {
case TUNGETIFF:
ret = tun_get_iff(current->nsproxy->net_ns, file, &ifr);
ret = tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
if (ret)
break;
@ -1201,7 +1194,6 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
case TUNSETLINK:
/* Only allow setting the type when the interface is down */
rtnl_lock();
if (tun->dev->flags & IFF_UP) {
DBG(KERN_INFO "%s: Linktype set failed because interface is up\n",
tun->dev->name);
@ -1211,7 +1203,6 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
DBG(KERN_INFO "%s: linktype set to %d\n", tun->dev->name, tun->dev->type);
ret = 0;
}
rtnl_unlock();
break;
#ifdef TUN_DEBUG
@ -1220,9 +1211,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
break;
#endif
case TUNSETOFFLOAD:
rtnl_lock();
ret = set_offload(tun->dev, arg);
rtnl_unlock();
break;
case TUNSETTXFILTER:
@ -1230,9 +1219,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
ret = -EINVAL;
if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
break;
rtnl_lock();
ret = update_filter(&tun->txflt, (void __user *)arg);
rtnl_unlock();
break;
case SIOCGIFHWADDR:
@ -1248,9 +1235,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
DBG(KERN_DEBUG "%s: set hw address: %pM\n",
tun->dev->name, ifr.ifr_hwaddr.sa_data);
rtnl_lock();
ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
rtnl_unlock();
break;
case TUNGETSNDBUF:
@ -1273,6 +1258,9 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
break;
};
unlock:
rtnl_unlock();
if (tun)
tun_put(tun);
return ret;
}
@ -1361,7 +1349,7 @@ static const struct file_operations tun_fops = {
.write = do_sync_write,
.aio_write = tun_chr_aio_write,
.poll = tun_chr_poll,
.ioctl = tun_chr_ioctl,
.unlocked_ioctl = tun_chr_ioctl,
.open = tun_chr_open,
.release = tun_chr_close,
.fasync = tun_chr_fasync

View File

@ -3111,10 +3111,11 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
u8 __iomem *bd; /* BD pointer */
u32 bd_status;
u8 txQ = 0;
unsigned long flags;
ugeth_vdbg("%s: IN", __func__);
spin_lock_irq(&ugeth->lock);
spin_lock_irqsave(&ugeth->lock, flags);
dev->stats.tx_bytes += skb->len;
@ -3171,7 +3172,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
uccf = ugeth->uccf;
out_be16(uccf->p_utodr, UCC_FAST_TOD);
#endif
spin_unlock_irq(&ugeth->lock);
spin_unlock_irqrestore(&ugeth->lock, flags);
return 0;
}

View File

@ -250,6 +250,8 @@ PEGASUS_DEV( "IO DATA USB ET/TX", VENDOR_IODATA, 0x0904,
DEFAULT_GPIO_RESET )
PEGASUS_DEV( "IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913,
DEFAULT_GPIO_RESET | PEGASUS_II )
PEGASUS_DEV( "IO DATA USB ETX-US2", VENDOR_IODATA, 0x092a,
DEFAULT_GPIO_RESET | PEGASUS_II )
PEGASUS_DEV( "Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a,
DEFAULT_GPIO_RESET)
PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x4002,

View File

@ -1218,6 +1218,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
unsigned entry;
unsigned long flags;
/* Caution: the write order is important here, set the field
with the "ownership" bits last. */
@ -1261,7 +1262,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
/* lock eth irq */
spin_lock_irq(&rp->lock);
spin_lock_irqsave(&rp->lock, flags);
wmb();
rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
wmb();
@ -1280,7 +1281,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
spin_unlock_irq(&rp->lock);
spin_unlock_irqrestore(&rp->lock, flags);
if (debug > 4) {
printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",

View File

@ -1778,7 +1778,7 @@ static void velocity_error(struct velocity_info *vptr, int status)
* mode
*/
if (vptr->rev_id < REV_ID_VT3216_A0) {
if (vptr->mii_status | VELOCITY_DUPLEX_FULL)
if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
else
BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);

View File

@ -1967,13 +1967,14 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
int ret;
mutex_lock(&ar->mutex);
if ((param) && !(queue > __AR9170_NUM_TXQ)) {
if (queue < __AR9170_NUM_TXQ) {
memcpy(&ar->edcf[ar9170_qos_hwmap[queue]],
param, sizeof(*param));
ret = ar9170_set_qos(ar);
} else
} else {
ret = -EINVAL;
}
mutex_unlock(&ar->mutex);
return ret;

View File

@ -598,11 +598,15 @@ static int ar9170_usb_request_firmware(struct ar9170_usb *aru)
err = request_firmware(&aru->init_values, "ar9170-1.fw",
&aru->udev->dev);
if (err) {
dev_err(&aru->udev->dev, "file with init values not found.\n");
return err;
}
err = request_firmware(&aru->firmware, "ar9170-2.fw", &aru->udev->dev);
if (err) {
release_firmware(aru->init_values);
dev_err(&aru->udev->dev, "file with init values not found.\n");
dev_err(&aru->udev->dev, "firmware file not found.\n");
return err;
}

View File

@ -6226,7 +6226,7 @@ static void ipw_add_scan_channels(struct ipw_priv *priv,
};
u8 channel;
while (channel_index < IPW_SCAN_CHANNELS) {
while (channel_index < IPW_SCAN_CHANNELS - 1) {
channel =
priv->speed_scan[priv->speed_scan_pos];
if (channel == 0) {

View File

@ -1,7 +1,6 @@
/* Copyright (C) 2006, Red Hat, Inc. */
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/etherdevice.h>
#include <linux/ieee80211.h>
#include <linux/if_arp.h>
@ -44,21 +43,21 @@ static int get_common_rates(struct lbs_private *priv,
u16 *rates_size)
{
u8 *card_rates = lbs_bg_rates;
size_t num_card_rates = sizeof(lbs_bg_rates);
int ret = 0, i, j;
u8 tmp[(ARRAY_SIZE(lbs_bg_rates) - 1) * (*rates_size - 1)];
u8 tmp[30];
size_t tmp_size = 0;
/* For each rate in card_rates that exists in rate1, copy to tmp */
for (i = 0; i < ARRAY_SIZE(lbs_bg_rates) && card_rates[i]; i++) {
for (j = 0; j < *rates_size && rates[j]; j++) {
for (i = 0; card_rates[i] && (i < num_card_rates); i++) {
for (j = 0; rates[j] && (j < *rates_size); j++) {
if (rates[j] == card_rates[i])
tmp[tmp_size++] = card_rates[i];
}
}
lbs_deb_hex(LBS_DEB_JOIN, "AP rates ", rates, *rates_size);
lbs_deb_hex(LBS_DEB_JOIN, "card rates ", card_rates,
ARRAY_SIZE(lbs_bg_rates));
lbs_deb_hex(LBS_DEB_JOIN, "card rates ", card_rates, num_card_rates);
lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size);
lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate);
@ -70,7 +69,10 @@ static int get_common_rates(struct lbs_private *priv,
lbs_pr_alert("Previously set fixed data rate %#x isn't "
"compatible with the network.\n", priv->cur_rate);
ret = -1;
goto done;
}
ret = 0;
done:
memset(rates, 0, *rates_size);
*rates_size = min_t(int, tmp_size, *rates_size);
@ -320,7 +322,7 @@ static int lbs_associate(struct lbs_private *priv,
rates = (struct mrvl_ie_rates_param_set *) pos;
rates->header.type = cpu_to_le16(TLV_TYPE_RATES);
memcpy(&rates->rates, &bss->rates, MAX_RATES);
tmplen = min_t(u16, ARRAY_SIZE(rates->rates), MAX_RATES);
tmplen = MAX_RATES;
if (get_common_rates(priv, rates->rates, &tmplen)) {
ret = -1;
goto done;
@ -596,7 +598,7 @@ static int lbs_adhoc_join(struct lbs_private *priv,
/* Copy Data rates from the rates recorded in scan response */
memset(cmd.bss.rates, 0, sizeof(cmd.bss.rates));
ratesize = min_t(u16, ARRAY_SIZE(cmd.bss.rates), MAX_RATES);
ratesize = min_t(u16, sizeof(cmd.bss.rates), MAX_RATES);
memcpy(cmd.bss.rates, bss->rates, ratesize);
if (get_common_rates(priv, cmd.bss.rates, &ratesize)) {
lbs_deb_join("ADHOC_JOIN: get_common_rates returned error.\n");

View File

@ -56,8 +56,8 @@ struct rxpd {
u8 bss_type;
/* BSS number */
u8 bss_num;
} bss;
} u;
} __attribute__ ((packed)) bss;
} __attribute__ ((packed)) u;
/* SNR */
u8 snr;

View File

@ -261,7 +261,7 @@ struct mwl8k_vif {
*/
};
#define MWL8K_VIF(_vif) (struct mwl8k_vif *)(&((_vif)->drv_priv))
#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv))
static const struct ieee80211_channel mwl8k_channels[] = {
{ .center_freq = 2412, .hw_value = 1, },
@ -1012,6 +1012,8 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
rmb();
skb = rxq->rx_skb[rxq->rx_head];
if (skb == NULL)
break;
rxq->rx_skb[rxq->rx_head] = NULL;
rxq->rx_head = (rxq->rx_head + 1) % MWL8K_RX_DESCS;
@ -1591,6 +1593,9 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
timeout = wait_for_completion_timeout(&cmd_wait,
msecs_to_jiffies(MWL8K_CMD_TIMEOUT_MS));
pci_unmap_single(priv->pdev, dma_addr, dma_size,
PCI_DMA_BIDIRECTIONAL);
result = &cmd->result;
if (!timeout) {
spin_lock_irq(&priv->fw_lock);
@ -1610,8 +1615,6 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
*result);
}
pci_unmap_single(priv->pdev, dma_addr, dma_size,
PCI_DMA_BIDIRECTIONAL);
return rc;
}
@ -1654,18 +1657,18 @@ static int mwl8k_cmd_get_hw_spec(struct ieee80211_hw *hw)
memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr));
cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rx_desc_dma);
cmd->num_tx_queues = MWL8K_TX_QUEUES;
cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
for (i = 0; i < MWL8K_TX_QUEUES; i++)
cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].tx_desc_dma);
cmd->num_tx_desc_per_queue = MWL8K_TX_DESCS;
cmd->total_rx_desc = MWL8K_RX_DESCS;
cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
cmd->total_rx_desc = cpu_to_le32(MWL8K_RX_DESCS);
rc = mwl8k_post_cmd(hw, &cmd->header);
if (!rc) {
SET_IEEE80211_PERM_ADDR(hw, cmd->perm_addr);
priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
priv->fw_rev = cmd->fw_rev;
priv->fw_rev = le32_to_cpu(cmd->fw_rev);
priv->hw_rev = cmd->hw_rev;
priv->region_code = le16_to_cpu(cmd->region_code);
}
@ -3216,15 +3219,19 @@ static int mwl8k_configure_filter_wt(struct work_struct *wt)
struct dev_addr_list *mclist = worker->mclist;
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_vif *mv_vif;
int rc = 0;
if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
rc = mwl8k_cmd_set_pre_scan(hw);
else {
mv_vif = MWL8K_VIF(priv->vif);
rc = mwl8k_cmd_set_post_scan(hw, mv_vif->bssid);
u8 *bssid;
bssid = "\x00\x00\x00\x00\x00\x00";
if (priv->vif != NULL)
bssid = MWL8K_VIF(priv->vif)->bssid;
rc = mwl8k_cmd_set_post_scan(hw, bssid);
}
}
@ -3726,6 +3733,8 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
ieee80211_stop_queues(hw);
ieee80211_unregister_hw(hw);
/* Remove tx reclaim tasklet */
tasklet_kill(&priv->tx_reclaim_task);
@ -3739,8 +3748,6 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
for (i = 0; i < MWL8K_TX_QUEUES; i++)
mwl8k_txq_reclaim(hw, i, 1);
ieee80211_unregister_hw(hw);
for (i = 0; i < MWL8K_TX_QUEUES; i++)
mwl8k_txq_deinit(hw, i);

View File

@ -849,13 +849,15 @@ struct rt2x00_dev {
static inline void rt2x00_rf_read(struct rt2x00_dev *rt2x00dev,
const unsigned int word, u32 *data)
{
*data = rt2x00dev->rf[word];
BUG_ON(word < 1 || word > rt2x00dev->ops->rf_size / sizeof(u32));
*data = rt2x00dev->rf[word - 1];
}
static inline void rt2x00_rf_write(struct rt2x00_dev *rt2x00dev,
const unsigned int word, u32 data)
{
rt2x00dev->rf[word] = data;
BUG_ON(word < 1 || word > rt2x00dev->ops->rf_size / sizeof(u32));
rt2x00dev->rf[word - 1] = data;
}
/*

View File

@ -120,6 +120,9 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z,
for (i = ARRAY_SIZE(cards)-1; i >= 0; i--)
if (z->id == cards[i].id)
break;
if (i < 0)
return -ENODEV;
board = z->resource.start;
ioaddr = board+cards[i].offset;
dev = alloc_ei_netdev();

View File

@ -527,7 +527,7 @@ config SERIAL_S3C24A0
config SERIAL_S3C6400
tristate "Samsung S3C6400/S3C6410 Serial port support"
depends on SERIAL_SAMSUNG && (CPU_S3C600 || CPU_S3C6410)
depends on SERIAL_SAMSUNG && (CPU_S3C6400 || CPU_S3C6410)
default y
help
Serial port support for the Samsung S3C6400 and S3C6410

Some files were not shown because too many files have changed in this diff Show More