Merge commit 'v2.6.38-rc4' into perf/core
Merge reason: pick up the latest fixes. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
commit
3e86858133
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
||||||
VERSION = 2
|
VERSION = 2
|
||||||
PATCHLEVEL = 6
|
PATCHLEVEL = 6
|
||||||
SUBLEVEL = 38
|
SUBLEVEL = 38
|
||||||
EXTRAVERSION = -rc3
|
EXTRAVERSION = -rc4
|
||||||
NAME = Flesh-Eating Bats with Fangs
|
NAME = Flesh-Eating Bats with Fangs
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
|
|
@ -838,7 +838,7 @@ EXPORT_SYMBOL(ep93xx_i2s_release);
|
||||||
static struct resource ep93xx_ac97_resources[] = {
|
static struct resource ep93xx_ac97_resources[] = {
|
||||||
{
|
{
|
||||||
.start = EP93XX_AAC_PHYS_BASE,
|
.start = EP93XX_AAC_PHYS_BASE,
|
||||||
.end = EP93XX_AAC_PHYS_BASE + 0xb0 - 1,
|
.end = EP93XX_AAC_PHYS_BASE + 0xac - 1,
|
||||||
.flags = IORESOURCE_MEM,
|
.flags = IORESOURCE_MEM,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
@ -180,7 +180,7 @@ static const uint32_t mx25pdk_keymap[] = {
|
||||||
KEY(3, 3, KEY_POWER),
|
KEY(3, 3, KEY_POWER),
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct matrix_keymap_data mx25pdk_keymap_data __initdata = {
|
static const struct matrix_keymap_data mx25pdk_keymap_data __initconst = {
|
||||||
.keymap = mx25pdk_keymap,
|
.keymap = mx25pdk_keymap,
|
||||||
.keymap_size = ARRAY_SIZE(mx25pdk_keymap),
|
.keymap_size = ARRAY_SIZE(mx25pdk_keymap),
|
||||||
};
|
};
|
||||||
|
|
|
@ -304,7 +304,7 @@ static int name##_set_rate(struct clk *clk, unsigned long rate) \
|
||||||
reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
|
reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
|
||||||
reg &= ~BM_CLKCTRL_##dr##_DIV; \
|
reg &= ~BM_CLKCTRL_##dr##_DIV; \
|
||||||
reg |= div << BP_CLKCTRL_##dr##_DIV; \
|
reg |= div << BP_CLKCTRL_##dr##_DIV; \
|
||||||
if (reg | (1 << clk->enable_shift)) { \
|
if (reg & (1 << clk->enable_shift)) { \
|
||||||
pr_err("%s: clock is gated\n", __func__); \
|
pr_err("%s: clock is gated\n", __func__); \
|
||||||
return -EINVAL; \
|
return -EINVAL; \
|
||||||
} \
|
} \
|
||||||
|
@ -347,7 +347,7 @@ static int name##_set_parent(struct clk *clk, struct clk *parent) \
|
||||||
{ \
|
{ \
|
||||||
if (parent != clk->parent) { \
|
if (parent != clk->parent) { \
|
||||||
__raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit, \
|
__raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit, \
|
||||||
HW_CLKCTRL_CLKSEQ_TOG); \
|
CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG); \
|
||||||
clk->parent = parent; \
|
clk->parent = parent; \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
|
|
|
@ -355,12 +355,12 @@ static int name##_set_rate(struct clk *clk, unsigned long rate) \
|
||||||
} else { \
|
} else { \
|
||||||
reg &= ~BM_CLKCTRL_##dr##_DIV; \
|
reg &= ~BM_CLKCTRL_##dr##_DIV; \
|
||||||
reg |= div << BP_CLKCTRL_##dr##_DIV; \
|
reg |= div << BP_CLKCTRL_##dr##_DIV; \
|
||||||
if (reg | (1 << clk->enable_shift)) { \
|
if (reg & (1 << clk->enable_shift)) { \
|
||||||
pr_err("%s: clock is gated\n", __func__); \
|
pr_err("%s: clock is gated\n", __func__); \
|
||||||
return -EINVAL; \
|
return -EINVAL; \
|
||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU); \
|
__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
|
||||||
\
|
\
|
||||||
for (i = 10000; i; i--) \
|
for (i = 10000; i; i--) \
|
||||||
if (!(__raw_readl(CLKCTRL_BASE_ADDR + \
|
if (!(__raw_readl(CLKCTRL_BASE_ADDR + \
|
||||||
|
@ -483,7 +483,7 @@ static int name##_set_parent(struct clk *clk, struct clk *parent) \
|
||||||
{ \
|
{ \
|
||||||
if (parent != clk->parent) { \
|
if (parent != clk->parent) { \
|
||||||
__raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit, \
|
__raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit, \
|
||||||
HW_CLKCTRL_CLKSEQ_TOG); \
|
CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG); \
|
||||||
clk->parent = parent; \
|
clk->parent = parent; \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
|
@ -609,7 +609,6 @@ static struct clk_lookup lookups[] = {
|
||||||
_REGISTER_CLOCK("duart", NULL, uart_clk)
|
_REGISTER_CLOCK("duart", NULL, uart_clk)
|
||||||
_REGISTER_CLOCK("imx28-fec.0", NULL, fec_clk)
|
_REGISTER_CLOCK("imx28-fec.0", NULL, fec_clk)
|
||||||
_REGISTER_CLOCK("imx28-fec.1", NULL, fec_clk)
|
_REGISTER_CLOCK("imx28-fec.1", NULL, fec_clk)
|
||||||
_REGISTER_CLOCK("fec.0", NULL, fec_clk)
|
|
||||||
_REGISTER_CLOCK("rtc", NULL, rtc_clk)
|
_REGISTER_CLOCK("rtc", NULL, rtc_clk)
|
||||||
_REGISTER_CLOCK("pll2", NULL, pll2_clk)
|
_REGISTER_CLOCK("pll2", NULL, pll2_clk)
|
||||||
_REGISTER_CLOCK(NULL, "hclk", hbus_clk)
|
_REGISTER_CLOCK(NULL, "hclk", hbus_clk)
|
||||||
|
|
|
@ -57,7 +57,6 @@ static void __clk_disable(struct clk *clk)
|
||||||
if (clk->disable)
|
if (clk->disable)
|
||||||
clk->disable(clk);
|
clk->disable(clk);
|
||||||
__clk_disable(clk->parent);
|
__clk_disable(clk->parent);
|
||||||
__clk_disable(clk->secondary);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -68,7 +67,6 @@ static int __clk_enable(struct clk *clk)
|
||||||
|
|
||||||
if (clk->usecount++ == 0) {
|
if (clk->usecount++ == 0) {
|
||||||
__clk_enable(clk->parent);
|
__clk_enable(clk->parent);
|
||||||
__clk_enable(clk->secondary);
|
|
||||||
|
|
||||||
if (clk->enable)
|
if (clk->enable)
|
||||||
clk->enable(clk);
|
clk->enable(clk);
|
||||||
|
|
|
@ -139,6 +139,8 @@ static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
|
||||||
struct mxs_gpio_port *port = (struct mxs_gpio_port *)get_irq_data(irq);
|
struct mxs_gpio_port *port = (struct mxs_gpio_port *)get_irq_data(irq);
|
||||||
u32 gpio_irq_no_base = port->virtual_irq_start;
|
u32 gpio_irq_no_base = port->virtual_irq_start;
|
||||||
|
|
||||||
|
desc->irq_data.chip->irq_ack(&desc->irq_data);
|
||||||
|
|
||||||
irq_stat = __raw_readl(port->base + PINCTRL_IRQSTAT(port->id)) &
|
irq_stat = __raw_readl(port->base + PINCTRL_IRQSTAT(port->id)) &
|
||||||
__raw_readl(port->base + PINCTRL_IRQEN(port->id));
|
__raw_readl(port->base + PINCTRL_IRQEN(port->id));
|
||||||
|
|
||||||
|
|
|
@ -29,8 +29,6 @@ struct clk {
|
||||||
int id;
|
int id;
|
||||||
/* Source clock this clk depends on */
|
/* Source clock this clk depends on */
|
||||||
struct clk *parent;
|
struct clk *parent;
|
||||||
/* Secondary clock to enable/disable with this clock */
|
|
||||||
struct clk *secondary;
|
|
||||||
/* Reference count of clock enable/disable */
|
/* Reference count of clock enable/disable */
|
||||||
__s8 usecount;
|
__s8 usecount;
|
||||||
/* Register bit position for clock's enable/disable control. */
|
/* Register bit position for clock's enable/disable control. */
|
||||||
|
|
|
@ -37,7 +37,7 @@ int omap_lcd_dma_running(void)
|
||||||
* On OMAP1510, internal LCD controller will start the transfer
|
* On OMAP1510, internal LCD controller will start the transfer
|
||||||
* when it gets enabled, so assume DMA running if LCD enabled.
|
* when it gets enabled, so assume DMA running if LCD enabled.
|
||||||
*/
|
*/
|
||||||
if (cpu_is_omap1510())
|
if (cpu_is_omap15xx())
|
||||||
if (omap_readw(OMAP_LCDC_CONTROL) & OMAP_LCDC_CTRL_LCD_EN)
|
if (omap_readw(OMAP_LCDC_CONTROL) & OMAP_LCDC_CTRL_LCD_EN)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
|
@ -95,7 +95,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_single_transfer);
|
||||||
|
|
||||||
void omap_set_lcd_dma_b1_rotation(int rotate)
|
void omap_set_lcd_dma_b1_rotation(int rotate)
|
||||||
{
|
{
|
||||||
if (cpu_is_omap1510()) {
|
if (cpu_is_omap15xx()) {
|
||||||
printk(KERN_ERR "DMA rotation is not supported in 1510 mode\n");
|
printk(KERN_ERR "DMA rotation is not supported in 1510 mode\n");
|
||||||
BUG();
|
BUG();
|
||||||
return;
|
return;
|
||||||
|
@ -106,7 +106,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_b1_rotation);
|
||||||
|
|
||||||
void omap_set_lcd_dma_b1_mirror(int mirror)
|
void omap_set_lcd_dma_b1_mirror(int mirror)
|
||||||
{
|
{
|
||||||
if (cpu_is_omap1510()) {
|
if (cpu_is_omap15xx()) {
|
||||||
printk(KERN_ERR "DMA mirror is not supported in 1510 mode\n");
|
printk(KERN_ERR "DMA mirror is not supported in 1510 mode\n");
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
@ -116,7 +116,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_b1_mirror);
|
||||||
|
|
||||||
void omap_set_lcd_dma_b1_vxres(unsigned long vxres)
|
void omap_set_lcd_dma_b1_vxres(unsigned long vxres)
|
||||||
{
|
{
|
||||||
if (cpu_is_omap1510()) {
|
if (cpu_is_omap15xx()) {
|
||||||
printk(KERN_ERR "DMA virtual resulotion is not supported "
|
printk(KERN_ERR "DMA virtual resulotion is not supported "
|
||||||
"in 1510 mode\n");
|
"in 1510 mode\n");
|
||||||
BUG();
|
BUG();
|
||||||
|
@ -127,7 +127,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_b1_vxres);
|
||||||
|
|
||||||
void omap_set_lcd_dma_b1_scale(unsigned int xscale, unsigned int yscale)
|
void omap_set_lcd_dma_b1_scale(unsigned int xscale, unsigned int yscale)
|
||||||
{
|
{
|
||||||
if (cpu_is_omap1510()) {
|
if (cpu_is_omap15xx()) {
|
||||||
printk(KERN_ERR "DMA scale is not supported in 1510 mode\n");
|
printk(KERN_ERR "DMA scale is not supported in 1510 mode\n");
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
@ -177,7 +177,7 @@ static void set_b1_regs(void)
|
||||||
bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
|
bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
|
||||||
/* 1510 DMA requires the bottom address to be 2 more
|
/* 1510 DMA requires the bottom address to be 2 more
|
||||||
* than the actual last memory access location. */
|
* than the actual last memory access location. */
|
||||||
if (cpu_is_omap1510() &&
|
if (cpu_is_omap15xx() &&
|
||||||
lcd_dma.data_type == OMAP_DMA_DATA_TYPE_S32)
|
lcd_dma.data_type == OMAP_DMA_DATA_TYPE_S32)
|
||||||
bottom += 2;
|
bottom += 2;
|
||||||
ei = PIXSTEP(0, 0, 1, 0);
|
ei = PIXSTEP(0, 0, 1, 0);
|
||||||
|
@ -241,7 +241,7 @@ static void set_b1_regs(void)
|
||||||
return; /* Suppress warning about uninitialized vars */
|
return; /* Suppress warning about uninitialized vars */
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpu_is_omap1510()) {
|
if (cpu_is_omap15xx()) {
|
||||||
omap_writew(top >> 16, OMAP1510_DMA_LCD_TOP_F1_U);
|
omap_writew(top >> 16, OMAP1510_DMA_LCD_TOP_F1_U);
|
||||||
omap_writew(top, OMAP1510_DMA_LCD_TOP_F1_L);
|
omap_writew(top, OMAP1510_DMA_LCD_TOP_F1_L);
|
||||||
omap_writew(bottom >> 16, OMAP1510_DMA_LCD_BOT_F1_U);
|
omap_writew(bottom >> 16, OMAP1510_DMA_LCD_BOT_F1_U);
|
||||||
|
@ -343,7 +343,7 @@ void omap_free_lcd_dma(void)
|
||||||
BUG();
|
BUG();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (!cpu_is_omap1510())
|
if (!cpu_is_omap15xx())
|
||||||
omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1,
|
omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1,
|
||||||
OMAP1610_DMA_LCD_CCR);
|
OMAP1610_DMA_LCD_CCR);
|
||||||
lcd_dma.reserved = 0;
|
lcd_dma.reserved = 0;
|
||||||
|
@ -360,7 +360,7 @@ void omap_enable_lcd_dma(void)
|
||||||
* connected. Otherwise the OMAP internal controller will
|
* connected. Otherwise the OMAP internal controller will
|
||||||
* start the transfer when it gets enabled.
|
* start the transfer when it gets enabled.
|
||||||
*/
|
*/
|
||||||
if (cpu_is_omap1510() || !lcd_dma.ext_ctrl)
|
if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
w = omap_readw(OMAP1610_DMA_LCD_CTRL);
|
w = omap_readw(OMAP1610_DMA_LCD_CTRL);
|
||||||
|
@ -378,14 +378,14 @@ EXPORT_SYMBOL(omap_enable_lcd_dma);
|
||||||
void omap_setup_lcd_dma(void)
|
void omap_setup_lcd_dma(void)
|
||||||
{
|
{
|
||||||
BUG_ON(lcd_dma.active);
|
BUG_ON(lcd_dma.active);
|
||||||
if (!cpu_is_omap1510()) {
|
if (!cpu_is_omap15xx()) {
|
||||||
/* Set some reasonable defaults */
|
/* Set some reasonable defaults */
|
||||||
omap_writew(0x5440, OMAP1610_DMA_LCD_CCR);
|
omap_writew(0x5440, OMAP1610_DMA_LCD_CCR);
|
||||||
omap_writew(0x9102, OMAP1610_DMA_LCD_CSDP);
|
omap_writew(0x9102, OMAP1610_DMA_LCD_CSDP);
|
||||||
omap_writew(0x0004, OMAP1610_DMA_LCD_LCH_CTRL);
|
omap_writew(0x0004, OMAP1610_DMA_LCD_LCH_CTRL);
|
||||||
}
|
}
|
||||||
set_b1_regs();
|
set_b1_regs();
|
||||||
if (!cpu_is_omap1510()) {
|
if (!cpu_is_omap15xx()) {
|
||||||
u16 w;
|
u16 w;
|
||||||
|
|
||||||
w = omap_readw(OMAP1610_DMA_LCD_CCR);
|
w = omap_readw(OMAP1610_DMA_LCD_CCR);
|
||||||
|
@ -407,7 +407,7 @@ void omap_stop_lcd_dma(void)
|
||||||
u16 w;
|
u16 w;
|
||||||
|
|
||||||
lcd_dma.active = 0;
|
lcd_dma.active = 0;
|
||||||
if (cpu_is_omap1510() || !lcd_dma.ext_ctrl)
|
if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
w = omap_readw(OMAP1610_DMA_LCD_CCR);
|
w = omap_readw(OMAP1610_DMA_LCD_CCR);
|
||||||
|
|
|
@ -44,7 +44,6 @@
|
||||||
#include <linux/clocksource.h>
|
#include <linux/clocksource.h>
|
||||||
#include <linux/clockchips.h>
|
#include <linux/clockchips.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/sched.h>
|
|
||||||
|
|
||||||
#include <asm/system.h>
|
#include <asm/system.h>
|
||||||
#include <mach/hardware.h>
|
#include <mach/hardware.h>
|
||||||
|
|
|
@ -115,9 +115,6 @@ static struct omap2_hsmmc_info mmc[] = {
|
||||||
|
|
||||||
static int devkit8000_panel_enable_lcd(struct omap_dss_device *dssdev)
|
static int devkit8000_panel_enable_lcd(struct omap_dss_device *dssdev)
|
||||||
{
|
{
|
||||||
twl_i2c_write_u8(TWL4030_MODULE_GPIO, 0x80, REG_GPIODATADIR1);
|
|
||||||
twl_i2c_write_u8(TWL4030_MODULE_LED, 0x0, 0x0);
|
|
||||||
|
|
||||||
if (gpio_is_valid(dssdev->reset_gpio))
|
if (gpio_is_valid(dssdev->reset_gpio))
|
||||||
gpio_set_value_cansleep(dssdev->reset_gpio, 1);
|
gpio_set_value_cansleep(dssdev->reset_gpio, 1);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -247,6 +244,8 @@ static struct gpio_led gpio_leds[];
|
||||||
static int devkit8000_twl_gpio_setup(struct device *dev,
|
static int devkit8000_twl_gpio_setup(struct device *dev,
|
||||||
unsigned gpio, unsigned ngpio)
|
unsigned gpio, unsigned ngpio)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
omap_mux_init_gpio(29, OMAP_PIN_INPUT);
|
omap_mux_init_gpio(29, OMAP_PIN_INPUT);
|
||||||
/* gpio + 0 is "mmc0_cd" (input/IRQ) */
|
/* gpio + 0 is "mmc0_cd" (input/IRQ) */
|
||||||
mmc[0].gpio_cd = gpio + 0;
|
mmc[0].gpio_cd = gpio + 0;
|
||||||
|
@ -255,17 +254,23 @@ static int devkit8000_twl_gpio_setup(struct device *dev,
|
||||||
/* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
|
/* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
|
||||||
gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
|
gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
|
||||||
|
|
||||||
/* gpio + 1 is "LCD_PWREN" (out, active high) */
|
/* TWL4030_GPIO_MAX + 0 is "LCD_PWREN" (out, active high) */
|
||||||
devkit8000_lcd_device.reset_gpio = gpio + 1;
|
devkit8000_lcd_device.reset_gpio = gpio + TWL4030_GPIO_MAX + 0;
|
||||||
gpio_request(devkit8000_lcd_device.reset_gpio, "LCD_PWREN");
|
ret = gpio_request_one(devkit8000_lcd_device.reset_gpio,
|
||||||
/* Disable until needed */
|
GPIOF_DIR_OUT | GPIOF_INIT_LOW, "LCD_PWREN");
|
||||||
gpio_direction_output(devkit8000_lcd_device.reset_gpio, 0);
|
if (ret < 0) {
|
||||||
|
devkit8000_lcd_device.reset_gpio = -EINVAL;
|
||||||
|
printk(KERN_ERR "Failed to request GPIO for LCD_PWRN\n");
|
||||||
|
}
|
||||||
|
|
||||||
/* gpio + 7 is "DVI_PD" (out, active low) */
|
/* gpio + 7 is "DVI_PD" (out, active low) */
|
||||||
devkit8000_dvi_device.reset_gpio = gpio + 7;
|
devkit8000_dvi_device.reset_gpio = gpio + 7;
|
||||||
gpio_request(devkit8000_dvi_device.reset_gpio, "DVI PowerDown");
|
ret = gpio_request_one(devkit8000_dvi_device.reset_gpio,
|
||||||
/* Disable until needed */
|
GPIOF_DIR_OUT | GPIOF_INIT_LOW, "DVI PowerDown");
|
||||||
gpio_direction_output(devkit8000_dvi_device.reset_gpio, 0);
|
if (ret < 0) {
|
||||||
|
devkit8000_dvi_device.reset_gpio = -EINVAL;
|
||||||
|
printk(KERN_ERR "Failed to request GPIO for DVI PowerDown\n");
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -409,8 +409,6 @@ static void __init omap4_panda_init(void)
|
||||||
platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices));
|
platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices));
|
||||||
omap_serial_init();
|
omap_serial_init();
|
||||||
omap4_twl6030_hsmmc_init(mmc);
|
omap4_twl6030_hsmmc_init(mmc);
|
||||||
/* OMAP4 Panda uses internal transceiver so register nop transceiver */
|
|
||||||
usb_nop_xceiv_register();
|
|
||||||
omap4_ehci_init();
|
omap4_ehci_init();
|
||||||
usb_musb_init(&musb_board_data);
|
usb_musb_init(&musb_board_data);
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,9 +40,6 @@ static struct regulator_consumer_supply rm680_vemmc_consumers[] = {
|
||||||
static struct regulator_init_data rm680_vemmc = {
|
static struct regulator_init_data rm680_vemmc = {
|
||||||
.constraints = {
|
.constraints = {
|
||||||
.name = "rm680_vemmc",
|
.name = "rm680_vemmc",
|
||||||
.min_uV = 2900000,
|
|
||||||
.max_uV = 2900000,
|
|
||||||
.apply_uV = 1,
|
|
||||||
.valid_modes_mask = REGULATOR_MODE_NORMAL
|
.valid_modes_mask = REGULATOR_MODE_NORMAL
|
||||||
| REGULATOR_MODE_STANDBY,
|
| REGULATOR_MODE_STANDBY,
|
||||||
.valid_ops_mask = REGULATOR_CHANGE_STATUS
|
.valid_ops_mask = REGULATOR_CHANGE_STATUS
|
||||||
|
|
|
@ -1000,6 +1000,7 @@ int __init omap_mux_init(const char *name, u32 flags,
|
||||||
if (!partition->base) {
|
if (!partition->base) {
|
||||||
pr_err("%s: Could not ioremap mux partition at 0x%08x\n",
|
pr_err("%s: Could not ioremap mux partition at 0x%08x\n",
|
||||||
__func__, partition->phys);
|
__func__, partition->phys);
|
||||||
|
kfree(partition);
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -168,9 +168,10 @@ static void omap3_core_restore_context(void)
|
||||||
* once during boot sequence, but this works as we are not using secure
|
* once during boot sequence, but this works as we are not using secure
|
||||||
* services.
|
* services.
|
||||||
*/
|
*/
|
||||||
static void omap3_save_secure_ram_context(u32 target_mpu_state)
|
static void omap3_save_secure_ram_context(void)
|
||||||
{
|
{
|
||||||
u32 ret;
|
u32 ret;
|
||||||
|
int mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
|
||||||
|
|
||||||
if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
|
if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
|
||||||
/*
|
/*
|
||||||
|
@ -181,7 +182,7 @@ static void omap3_save_secure_ram_context(u32 target_mpu_state)
|
||||||
pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
|
pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
|
||||||
ret = _omap_save_secure_sram((u32 *)
|
ret = _omap_save_secure_sram((u32 *)
|
||||||
__pa(omap3_secure_ram_storage));
|
__pa(omap3_secure_ram_storage));
|
||||||
pwrdm_set_next_pwrst(mpu_pwrdm, target_mpu_state);
|
pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state);
|
||||||
/* Following is for error tracking, it should not happen */
|
/* Following is for error tracking, it should not happen */
|
||||||
if (ret) {
|
if (ret) {
|
||||||
printk(KERN_ERR "save_secure_sram() returns %08x\n",
|
printk(KERN_ERR "save_secure_sram() returns %08x\n",
|
||||||
|
@ -1094,7 +1095,7 @@ static int __init omap3_pm_init(void)
|
||||||
local_fiq_disable();
|
local_fiq_disable();
|
||||||
|
|
||||||
omap_dma_global_context_save();
|
omap_dma_global_context_save();
|
||||||
omap3_save_secure_ram_context(PWRDM_POWER_ON);
|
omap3_save_secure_ram_context();
|
||||||
omap_dma_global_context_restore();
|
omap_dma_global_context_restore();
|
||||||
|
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
|
|
@ -780,8 +780,7 @@ static int omap_sr_autocomp_show(void *data, u64 *val)
|
||||||
struct omap_sr *sr_info = (struct omap_sr *) data;
|
struct omap_sr *sr_info = (struct omap_sr *) data;
|
||||||
|
|
||||||
if (!sr_info) {
|
if (!sr_info) {
|
||||||
pr_warning("%s: omap_sr struct for sr_%s not found\n",
|
pr_warning("%s: omap_sr struct not found\n", __func__);
|
||||||
__func__, sr_info->voltdm->name);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -795,8 +794,7 @@ static int omap_sr_autocomp_store(void *data, u64 val)
|
||||||
struct omap_sr *sr_info = (struct omap_sr *) data;
|
struct omap_sr *sr_info = (struct omap_sr *) data;
|
||||||
|
|
||||||
if (!sr_info) {
|
if (!sr_info) {
|
||||||
pr_warning("%s: omap_sr struct for sr_%s not found\n",
|
pr_warning("%s: omap_sr struct not found\n", __func__);
|
||||||
__func__, sr_info->voltdm->name);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -834,7 +832,8 @@ static int __init omap_sr_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
if (!pdata) {
|
if (!pdata) {
|
||||||
dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
|
dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
|
||||||
return -EINVAL;
|
ret = -EINVAL;
|
||||||
|
goto err_free_devinfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||||
|
@ -966,7 +965,7 @@ static int __devexit omap_sr_remove(struct platform_device *pdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
sr_info = _sr_lookup(pdata->voltdm);
|
sr_info = _sr_lookup(pdata->voltdm);
|
||||||
if (!sr_info) {
|
if (IS_ERR(sr_info)) {
|
||||||
dev_warn(&pdev->dev, "%s: omap_sr struct not found\n",
|
dev_warn(&pdev->dev, "%s: omap_sr struct not found\n",
|
||||||
__func__);
|
__func__);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
|
@ -471,6 +471,7 @@ static void __init vdd_debugfs_init(struct omap_vdd_info *vdd)
|
||||||
strcat(name, vdd->voltdm.name);
|
strcat(name, vdd->voltdm.name);
|
||||||
|
|
||||||
vdd->debug_dir = debugfs_create_dir(name, voltage_dir);
|
vdd->debug_dir = debugfs_create_dir(name, voltage_dir);
|
||||||
|
kfree(name);
|
||||||
if (IS_ERR(vdd->debug_dir)) {
|
if (IS_ERR(vdd->debug_dir)) {
|
||||||
pr_warning("%s: Unable to create debugfs directory for"
|
pr_warning("%s: Unable to create debugfs directory for"
|
||||||
" vdd_%s\n", __func__, vdd->voltdm.name);
|
" vdd_%s\n", __func__, vdd->voltdm.name);
|
||||||
|
|
|
@ -95,6 +95,7 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id)
|
||||||
case MACH_TYPE_MX35_3DS:
|
case MACH_TYPE_MX35_3DS:
|
||||||
case MACH_TYPE_PCM043:
|
case MACH_TYPE_PCM043:
|
||||||
case MACH_TYPE_LILLY1131:
|
case MACH_TYPE_LILLY1131:
|
||||||
|
case MACH_TYPE_VPR200:
|
||||||
uart_base = MX3X_UART1_BASE_ADDR;
|
uart_base = MX3X_UART1_BASE_ADDR;
|
||||||
break;
|
break;
|
||||||
case MACH_TYPE_MAGX_ZN5:
|
case MACH_TYPE_MAGX_ZN5:
|
||||||
|
@ -102,6 +103,7 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id)
|
||||||
break;
|
break;
|
||||||
case MACH_TYPE_MX51_BABBAGE:
|
case MACH_TYPE_MX51_BABBAGE:
|
||||||
case MACH_TYPE_EUKREA_CPUIMX51SD:
|
case MACH_TYPE_EUKREA_CPUIMX51SD:
|
||||||
|
case MACH_TYPE_MX51_3DS:
|
||||||
uart_base = MX51_UART1_BASE_ADDR;
|
uart_base = MX51_UART1_BASE_ADDR;
|
||||||
break;
|
break;
|
||||||
case MACH_TYPE_MX50_RDP:
|
case MACH_TYPE_MX50_RDP:
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
#
|
#
|
||||||
# http://www.arm.linux.org.uk/developer/machines/?action=new
|
# http://www.arm.linux.org.uk/developer/machines/?action=new
|
||||||
#
|
#
|
||||||
# Last update: Sun Dec 12 23:24:27 2010
|
# Last update: Mon Feb 7 08:59:27 2011
|
||||||
#
|
#
|
||||||
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
|
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
|
||||||
#
|
#
|
||||||
|
@ -2240,7 +2240,7 @@ arm_ultimator2 MACH_ARM_ULTIMATOR2 ARM_ULTIMATOR2 2250
|
||||||
vs_v210 MACH_VS_V210 VS_V210 2252
|
vs_v210 MACH_VS_V210 VS_V210 2252
|
||||||
vs_v212 MACH_VS_V212 VS_V212 2253
|
vs_v212 MACH_VS_V212 VS_V212 2253
|
||||||
hmt MACH_HMT HMT 2254
|
hmt MACH_HMT HMT 2254
|
||||||
suen3 MACH_SUEN3 SUEN3 2255
|
km_kirkwood MACH_KM_KIRKWOOD KM_KIRKWOOD 2255
|
||||||
vesper MACH_VESPER VESPER 2256
|
vesper MACH_VESPER VESPER 2256
|
||||||
str9 MACH_STR9 STR9 2257
|
str9 MACH_STR9 STR9 2257
|
||||||
omap3_wl_ff MACH_OMAP3_WL_FF OMAP3_WL_FF 2258
|
omap3_wl_ff MACH_OMAP3_WL_FF OMAP3_WL_FF 2258
|
||||||
|
@ -2987,7 +2987,7 @@ pxwnas_500_1000 MACH_PXWNAS_500_1000 PXWNAS_500_1000 3001
|
||||||
ea20 MACH_EA20 EA20 3002
|
ea20 MACH_EA20 EA20 3002
|
||||||
awm2 MACH_AWM2 AWM2 3003
|
awm2 MACH_AWM2 AWM2 3003
|
||||||
ti8148evm MACH_TI8148EVM TI8148EVM 3004
|
ti8148evm MACH_TI8148EVM TI8148EVM 3004
|
||||||
tegra_seaboard MACH_TEGRA_SEABOARD TEGRA_SEABOARD 3005
|
seaboard MACH_SEABOARD SEABOARD 3005
|
||||||
linkstation_chlv2 MACH_LINKSTATION_CHLV2 LINKSTATION_CHLV2 3006
|
linkstation_chlv2 MACH_LINKSTATION_CHLV2 LINKSTATION_CHLV2 3006
|
||||||
tera_pro2_rack MACH_TERA_PRO2_RACK TERA_PRO2_RACK 3007
|
tera_pro2_rack MACH_TERA_PRO2_RACK TERA_PRO2_RACK 3007
|
||||||
rubys MACH_RUBYS RUBYS 3008
|
rubys MACH_RUBYS RUBYS 3008
|
||||||
|
@ -3190,7 +3190,7 @@ synergy MACH_SYNERGY SYNERGY 3205
|
||||||
ics_if_voip MACH_ICS_IF_VOIP ICS_IF_VOIP 3206
|
ics_if_voip MACH_ICS_IF_VOIP ICS_IF_VOIP 3206
|
||||||
wlf_cragg_6410 MACH_WLF_CRAGG_6410 WLF_CRAGG_6410 3207
|
wlf_cragg_6410 MACH_WLF_CRAGG_6410 WLF_CRAGG_6410 3207
|
||||||
punica MACH_PUNICA PUNICA 3208
|
punica MACH_PUNICA PUNICA 3208
|
||||||
sbc_nt250 MACH_SBC_NT250 SBC_NT250 3209
|
trimslice MACH_TRIMSLICE TRIMSLICE 3209
|
||||||
mx27_wmultra MACH_MX27_WMULTRA MX27_WMULTRA 3210
|
mx27_wmultra MACH_MX27_WMULTRA MX27_WMULTRA 3210
|
||||||
mackerel MACH_MACKEREL MACKEREL 3211
|
mackerel MACH_MACKEREL MACKEREL 3211
|
||||||
fa9x27 MACH_FA9X27 FA9X27 3213
|
fa9x27 MACH_FA9X27 FA9X27 3213
|
||||||
|
@ -3219,3 +3219,100 @@ pivicc MACH_PIVICC PIVICC 3235
|
||||||
pcm048 MACH_PCM048 PCM048 3236
|
pcm048 MACH_PCM048 PCM048 3236
|
||||||
dds MACH_DDS DDS 3237
|
dds MACH_DDS DDS 3237
|
||||||
chalten_xa1 MACH_CHALTEN_XA1 CHALTEN_XA1 3238
|
chalten_xa1 MACH_CHALTEN_XA1 CHALTEN_XA1 3238
|
||||||
|
ts48xx MACH_TS48XX TS48XX 3239
|
||||||
|
tonga2_tfttimer MACH_TONGA2_TFTTIMER TONGA2_TFTTIMER 3240
|
||||||
|
whistler MACH_WHISTLER WHISTLER 3241
|
||||||
|
asl_phoenix MACH_ASL_PHOENIX ASL_PHOENIX 3242
|
||||||
|
at91sam9263otlite MACH_AT91SAM9263OTLITE AT91SAM9263OTLITE 3243
|
||||||
|
ddplug MACH_DDPLUG DDPLUG 3244
|
||||||
|
d2plug MACH_D2PLUG D2PLUG 3245
|
||||||
|
kzm9d MACH_KZM9D KZM9D 3246
|
||||||
|
verdi_lte MACH_VERDI_LTE VERDI_LTE 3247
|
||||||
|
nanozoom MACH_NANOZOOM NANOZOOM 3248
|
||||||
|
dm3730_som_lv MACH_DM3730_SOM_LV DM3730_SOM_LV 3249
|
||||||
|
dm3730_torpedo MACH_DM3730_TORPEDO DM3730_TORPEDO 3250
|
||||||
|
anchovy MACH_ANCHOVY ANCHOVY 3251
|
||||||
|
re2rev20 MACH_RE2REV20 RE2REV20 3253
|
||||||
|
re2rev21 MACH_RE2REV21 RE2REV21 3254
|
||||||
|
cns21xx MACH_CNS21XX CNS21XX 3255
|
||||||
|
rider MACH_RIDER RIDER 3257
|
||||||
|
nsk330 MACH_NSK330 NSK330 3258
|
||||||
|
cns2133evb MACH_CNS2133EVB CNS2133EVB 3259
|
||||||
|
z3_816x_mod MACH_Z3_816X_MOD Z3_816X_MOD 3260
|
||||||
|
z3_814x_mod MACH_Z3_814X_MOD Z3_814X_MOD 3261
|
||||||
|
beect MACH_BEECT BEECT 3262
|
||||||
|
dma_thunderbug MACH_DMA_THUNDERBUG DMA_THUNDERBUG 3263
|
||||||
|
omn_at91sam9g20 MACH_OMN_AT91SAM9G20 OMN_AT91SAM9G20 3264
|
||||||
|
mx25_e2s_uc MACH_MX25_E2S_UC MX25_E2S_UC 3265
|
||||||
|
mione MACH_MIONE MIONE 3266
|
||||||
|
top9000_tcu MACH_TOP9000_TCU TOP9000_TCU 3267
|
||||||
|
top9000_bsl MACH_TOP9000_BSL TOP9000_BSL 3268
|
||||||
|
kingdom MACH_KINGDOM KINGDOM 3269
|
||||||
|
armadillo460 MACH_ARMADILLO460 ARMADILLO460 3270
|
||||||
|
lq2 MACH_LQ2 LQ2 3271
|
||||||
|
sweda_tms2 MACH_SWEDA_TMS2 SWEDA_TMS2 3272
|
||||||
|
mx53_loco MACH_MX53_LOCO MX53_LOCO 3273
|
||||||
|
acer_a8 MACH_ACER_A8 ACER_A8 3275
|
||||||
|
acer_gauguin MACH_ACER_GAUGUIN ACER_GAUGUIN 3276
|
||||||
|
guppy MACH_GUPPY GUPPY 3277
|
||||||
|
mx61_ard MACH_MX61_ARD MX61_ARD 3278
|
||||||
|
tx53 MACH_TX53 TX53 3279
|
||||||
|
omapl138_case_a3 MACH_OMAPL138_CASE_A3 OMAPL138_CASE_A3 3280
|
||||||
|
uemd MACH_UEMD UEMD 3281
|
||||||
|
ccwmx51mut MACH_CCWMX51MUT CCWMX51MUT 3282
|
||||||
|
rockhopper MACH_ROCKHOPPER ROCKHOPPER 3283
|
||||||
|
nookcolor MACH_NOOKCOLOR NOOKCOLOR 3284
|
||||||
|
hkdkc100 MACH_HKDKC100 HKDKC100 3285
|
||||||
|
ts42xx MACH_TS42XX TS42XX 3286
|
||||||
|
aebl MACH_AEBL AEBL 3287
|
||||||
|
wario MACH_WARIO WARIO 3288
|
||||||
|
gfs_spm MACH_GFS_SPM GFS_SPM 3289
|
||||||
|
cm_t3730 MACH_CM_T3730 CM_T3730 3290
|
||||||
|
isc3 MACH_ISC3 ISC3 3291
|
||||||
|
rascal MACH_RASCAL RASCAL 3292
|
||||||
|
hrefv60 MACH_HREFV60 HREFV60 3293
|
||||||
|
tpt_2_0 MACH_TPT_2_0 TPT_2_0 3294
|
||||||
|
pyramid_td MACH_PYRAMID_TD PYRAMID_TD 3295
|
||||||
|
splendor MACH_SPLENDOR SPLENDOR 3296
|
||||||
|
guf_planet MACH_GUF_PLANET GUF_PLANET 3297
|
||||||
|
msm8x60_qt MACH_MSM8X60_QT MSM8X60_QT 3298
|
||||||
|
htc_hd_mini MACH_HTC_HD_MINI HTC_HD_MINI 3299
|
||||||
|
athene MACH_ATHENE ATHENE 3300
|
||||||
|
deep_r_ek_1 MACH_DEEP_R_EK_1 DEEP_R_EK_1 3301
|
||||||
|
vivow_ct MACH_VIVOW_CT VIVOW_CT 3302
|
||||||
|
nery_1000 MACH_NERY_1000 NERY_1000 3303
|
||||||
|
rfl109145_ssrv MACH_RFL109145_SSRV RFL109145_SSRV 3304
|
||||||
|
nmh MACH_NMH NMH 3305
|
||||||
|
wn802t MACH_WN802T WN802T 3306
|
||||||
|
dragonet MACH_DRAGONET DRAGONET 3307
|
||||||
|
geneva_b MACH_GENEVA_B GENEVA_B 3308
|
||||||
|
at91sam9263desk16l MACH_AT91SAM9263DESK16L AT91SAM9263DESK16L 3309
|
||||||
|
bcmhana_sv MACH_BCMHANA_SV BCMHANA_SV 3310
|
||||||
|
bcmhana_tablet MACH_BCMHANA_TABLET BCMHANA_TABLET 3311
|
||||||
|
koi MACH_KOI KOI 3312
|
||||||
|
ts4800 MACH_TS4800 TS4800 3313
|
||||||
|
tqma9263 MACH_TQMA9263 TQMA9263 3314
|
||||||
|
holiday MACH_HOLIDAY HOLIDAY 3315
|
||||||
|
dma_6410 MACH_DMA6410 DMA6410 3316
|
||||||
|
pcats_overlay MACH_PCATS_OVERLAY PCATS_OVERLAY 3317
|
||||||
|
hwgw6410 MACH_HWGW6410 HWGW6410 3318
|
||||||
|
shenzhou MACH_SHENZHOU SHENZHOU 3319
|
||||||
|
cwme9210 MACH_CWME9210 CWME9210 3320
|
||||||
|
cwme9210js MACH_CWME9210JS CWME9210JS 3321
|
||||||
|
pgs_v1 MACH_PGS_SITARA PGS_SITARA 3322
|
||||||
|
colibri_tegra2 MACH_COLIBRI_TEGRA2 COLIBRI_TEGRA2 3323
|
||||||
|
w21 MACH_W21 W21 3324
|
||||||
|
polysat1 MACH_POLYSAT1 POLYSAT1 3325
|
||||||
|
dataway MACH_DATAWAY DATAWAY 3326
|
||||||
|
cobral138 MACH_COBRAL138 COBRAL138 3327
|
||||||
|
roverpcs8 MACH_ROVERPCS8 ROVERPCS8 3328
|
||||||
|
marvelc MACH_MARVELC MARVELC 3329
|
||||||
|
navefihid MACH_NAVEFIHID NAVEFIHID 3330
|
||||||
|
dm365_cv100 MACH_DM365_CV100 DM365_CV100 3331
|
||||||
|
able MACH_ABLE ABLE 3332
|
||||||
|
legacy MACH_LEGACY LEGACY 3333
|
||||||
|
icong MACH_ICONG ICONG 3334
|
||||||
|
rover_g8 MACH_ROVER_G8 ROVER_G8 3335
|
||||||
|
t5388p MACH_T5388P T5388P 3336
|
||||||
|
dingo MACH_DINGO DINGO 3337
|
||||||
|
goflexhome MACH_GOFLEXHOME GOFLEXHOME 3338
|
||||||
|
|
|
@ -40,8 +40,8 @@
|
||||||
|
|
||||||
/* MAS registers bit definitions */
|
/* MAS registers bit definitions */
|
||||||
|
|
||||||
#define MAS0_TLBSEL(x) ((x << 28) & 0x30000000)
|
#define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000)
|
||||||
#define MAS0_ESEL(x) ((x << 16) & 0x0FFF0000)
|
#define MAS0_ESEL(x) (((x) << 16) & 0x0FFF0000)
|
||||||
#define MAS0_NV(x) ((x) & 0x00000FFF)
|
#define MAS0_NV(x) ((x) & 0x00000FFF)
|
||||||
#define MAS0_HES 0x00004000
|
#define MAS0_HES 0x00004000
|
||||||
#define MAS0_WQ_ALLWAYS 0x00000000
|
#define MAS0_WQ_ALLWAYS 0x00000000
|
||||||
|
@ -50,12 +50,12 @@
|
||||||
|
|
||||||
#define MAS1_VALID 0x80000000
|
#define MAS1_VALID 0x80000000
|
||||||
#define MAS1_IPROT 0x40000000
|
#define MAS1_IPROT 0x40000000
|
||||||
#define MAS1_TID(x) ((x << 16) & 0x3FFF0000)
|
#define MAS1_TID(x) (((x) << 16) & 0x3FFF0000)
|
||||||
#define MAS1_IND 0x00002000
|
#define MAS1_IND 0x00002000
|
||||||
#define MAS1_TS 0x00001000
|
#define MAS1_TS 0x00001000
|
||||||
#define MAS1_TSIZE_MASK 0x00000f80
|
#define MAS1_TSIZE_MASK 0x00000f80
|
||||||
#define MAS1_TSIZE_SHIFT 7
|
#define MAS1_TSIZE_SHIFT 7
|
||||||
#define MAS1_TSIZE(x) ((x << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK)
|
#define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK)
|
||||||
|
|
||||||
#define MAS2_EPN 0xFFFFF000
|
#define MAS2_EPN 0xFFFFF000
|
||||||
#define MAS2_X0 0x00000040
|
#define MAS2_X0 0x00000040
|
||||||
|
|
|
@ -101,7 +101,7 @@ extern phys_addr_t kernstart_addr;
|
||||||
|
|
||||||
#ifdef CONFIG_FLATMEM
|
#ifdef CONFIG_FLATMEM
|
||||||
#define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT)
|
#define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT)
|
||||||
#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < (ARCH_PFN_OFFSET + max_mapnr))
|
#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
|
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
#include <asm/mmu.h>
|
#include <asm/mmu.h>
|
||||||
|
|
||||||
_GLOBAL(__setup_cpu_603)
|
_GLOBAL(__setup_cpu_603)
|
||||||
mflr r4
|
mflr r5
|
||||||
BEGIN_MMU_FTR_SECTION
|
BEGIN_MMU_FTR_SECTION
|
||||||
li r10,0
|
li r10,0
|
||||||
mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */
|
mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */
|
||||||
|
@ -27,60 +27,60 @@ BEGIN_FTR_SECTION
|
||||||
bl __init_fpu_registers
|
bl __init_fpu_registers
|
||||||
END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE)
|
END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE)
|
||||||
bl setup_common_caches
|
bl setup_common_caches
|
||||||
mtlr r4
|
mtlr r5
|
||||||
blr
|
blr
|
||||||
_GLOBAL(__setup_cpu_604)
|
_GLOBAL(__setup_cpu_604)
|
||||||
mflr r4
|
mflr r5
|
||||||
bl setup_common_caches
|
bl setup_common_caches
|
||||||
bl setup_604_hid0
|
bl setup_604_hid0
|
||||||
mtlr r4
|
mtlr r5
|
||||||
blr
|
blr
|
||||||
_GLOBAL(__setup_cpu_750)
|
_GLOBAL(__setup_cpu_750)
|
||||||
mflr r4
|
mflr r5
|
||||||
bl __init_fpu_registers
|
bl __init_fpu_registers
|
||||||
bl setup_common_caches
|
bl setup_common_caches
|
||||||
bl setup_750_7400_hid0
|
bl setup_750_7400_hid0
|
||||||
mtlr r4
|
mtlr r5
|
||||||
blr
|
blr
|
||||||
_GLOBAL(__setup_cpu_750cx)
|
_GLOBAL(__setup_cpu_750cx)
|
||||||
mflr r4
|
mflr r5
|
||||||
bl __init_fpu_registers
|
bl __init_fpu_registers
|
||||||
bl setup_common_caches
|
bl setup_common_caches
|
||||||
bl setup_750_7400_hid0
|
bl setup_750_7400_hid0
|
||||||
bl setup_750cx
|
bl setup_750cx
|
||||||
mtlr r4
|
mtlr r5
|
||||||
blr
|
blr
|
||||||
_GLOBAL(__setup_cpu_750fx)
|
_GLOBAL(__setup_cpu_750fx)
|
||||||
mflr r4
|
mflr r5
|
||||||
bl __init_fpu_registers
|
bl __init_fpu_registers
|
||||||
bl setup_common_caches
|
bl setup_common_caches
|
||||||
bl setup_750_7400_hid0
|
bl setup_750_7400_hid0
|
||||||
bl setup_750fx
|
bl setup_750fx
|
||||||
mtlr r4
|
mtlr r5
|
||||||
blr
|
blr
|
||||||
_GLOBAL(__setup_cpu_7400)
|
_GLOBAL(__setup_cpu_7400)
|
||||||
mflr r4
|
mflr r5
|
||||||
bl __init_fpu_registers
|
bl __init_fpu_registers
|
||||||
bl setup_7400_workarounds
|
bl setup_7400_workarounds
|
||||||
bl setup_common_caches
|
bl setup_common_caches
|
||||||
bl setup_750_7400_hid0
|
bl setup_750_7400_hid0
|
||||||
mtlr r4
|
mtlr r5
|
||||||
blr
|
blr
|
||||||
_GLOBAL(__setup_cpu_7410)
|
_GLOBAL(__setup_cpu_7410)
|
||||||
mflr r4
|
mflr r5
|
||||||
bl __init_fpu_registers
|
bl __init_fpu_registers
|
||||||
bl setup_7410_workarounds
|
bl setup_7410_workarounds
|
||||||
bl setup_common_caches
|
bl setup_common_caches
|
||||||
bl setup_750_7400_hid0
|
bl setup_750_7400_hid0
|
||||||
li r3,0
|
li r3,0
|
||||||
mtspr SPRN_L2CR2,r3
|
mtspr SPRN_L2CR2,r3
|
||||||
mtlr r4
|
mtlr r5
|
||||||
blr
|
blr
|
||||||
_GLOBAL(__setup_cpu_745x)
|
_GLOBAL(__setup_cpu_745x)
|
||||||
mflr r4
|
mflr r5
|
||||||
bl setup_common_caches
|
bl setup_common_caches
|
||||||
bl setup_745x_specifics
|
bl setup_745x_specifics
|
||||||
mtlr r4
|
mtlr r5
|
||||||
blr
|
blr
|
||||||
|
|
||||||
/* Enable caches for 603's, 604, 750 & 7400 */
|
/* Enable caches for 603's, 604, 750 & 7400 */
|
||||||
|
@ -194,10 +194,10 @@ setup_750cx:
|
||||||
cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
|
cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
|
||||||
cror 4*cr0+eq,4*cr0+eq,4*cr2+eq
|
cror 4*cr0+eq,4*cr0+eq,4*cr2+eq
|
||||||
bnelr
|
bnelr
|
||||||
lwz r6,CPU_SPEC_FEATURES(r5)
|
lwz r6,CPU_SPEC_FEATURES(r4)
|
||||||
li r7,CPU_FTR_CAN_NAP
|
li r7,CPU_FTR_CAN_NAP
|
||||||
andc r6,r6,r7
|
andc r6,r6,r7
|
||||||
stw r6,CPU_SPEC_FEATURES(r5)
|
stw r6,CPU_SPEC_FEATURES(r4)
|
||||||
blr
|
blr
|
||||||
|
|
||||||
/* 750fx specific
|
/* 750fx specific
|
||||||
|
@ -225,12 +225,12 @@ BEGIN_FTR_SECTION
|
||||||
andis. r11,r11,L3CR_L3E@h
|
andis. r11,r11,L3CR_L3E@h
|
||||||
beq 1f
|
beq 1f
|
||||||
END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
|
END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
|
||||||
lwz r6,CPU_SPEC_FEATURES(r5)
|
lwz r6,CPU_SPEC_FEATURES(r4)
|
||||||
andi. r0,r6,CPU_FTR_L3_DISABLE_NAP
|
andi. r0,r6,CPU_FTR_L3_DISABLE_NAP
|
||||||
beq 1f
|
beq 1f
|
||||||
li r7,CPU_FTR_CAN_NAP
|
li r7,CPU_FTR_CAN_NAP
|
||||||
andc r6,r6,r7
|
andc r6,r6,r7
|
||||||
stw r6,CPU_SPEC_FEATURES(r5)
|
stw r6,CPU_SPEC_FEATURES(r4)
|
||||||
1:
|
1:
|
||||||
mfspr r11,SPRN_HID0
|
mfspr r11,SPRN_HID0
|
||||||
|
|
||||||
|
|
|
@ -2076,8 +2076,8 @@ static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s)
|
||||||
* pointer on ppc64 and booke as we are running at 0 in real mode
|
* pointer on ppc64 and booke as we are running at 0 in real mode
|
||||||
* on ppc64 and reloc_offset is always 0 on booke.
|
* on ppc64 and reloc_offset is always 0 on booke.
|
||||||
*/
|
*/
|
||||||
if (s->cpu_setup) {
|
if (t->cpu_setup) {
|
||||||
s->cpu_setup(offset, s);
|
t->cpu_setup(offset, t);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_PPC64 || CONFIG_BOOKE */
|
#endif /* CONFIG_PPC64 || CONFIG_BOOKE */
|
||||||
}
|
}
|
||||||
|
|
|
@ -186,7 +186,7 @@ static void unmap_cpu_from_node(unsigned long cpu)
|
||||||
dbg("removing cpu %lu from node %d\n", cpu, node);
|
dbg("removing cpu %lu from node %d\n", cpu, node);
|
||||||
|
|
||||||
if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
|
if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
|
||||||
cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
|
cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
|
||||||
} else {
|
} else {
|
||||||
printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
|
printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
|
||||||
cpu, node);
|
cpu, node);
|
||||||
|
@ -1289,10 +1289,9 @@ u64 memory_hotplug_max(void)
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
#endif /* CONFIG_MEMORY_HOTPLUG */
|
||||||
|
|
||||||
/* Vrtual Processor Home Node (VPHN) support */
|
/* Virtual Processor Home Node (VPHN) support */
|
||||||
#ifdef CONFIG_PPC_SPLPAR
|
#ifdef CONFIG_PPC_SPLPAR
|
||||||
#define VPHN_NR_CHANGE_CTRS (8)
|
static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
|
||||||
static u8 vphn_cpu_change_counts[NR_CPUS][VPHN_NR_CHANGE_CTRS];
|
|
||||||
static cpumask_t cpu_associativity_changes_mask;
|
static cpumask_t cpu_associativity_changes_mask;
|
||||||
static int vphn_enabled;
|
static int vphn_enabled;
|
||||||
static void set_topology_timer(void);
|
static void set_topology_timer(void);
|
||||||
|
@ -1303,16 +1302,18 @@ static void set_topology_timer(void);
|
||||||
*/
|
*/
|
||||||
static void setup_cpu_associativity_change_counters(void)
|
static void setup_cpu_associativity_change_counters(void)
|
||||||
{
|
{
|
||||||
int cpu = 0;
|
int cpu;
|
||||||
|
|
||||||
|
/* The VPHN feature supports a maximum of 8 reference points */
|
||||||
|
BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
int i = 0;
|
int i;
|
||||||
u8 *counts = vphn_cpu_change_counts[cpu];
|
u8 *counts = vphn_cpu_change_counts[cpu];
|
||||||
volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
|
volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
|
||||||
|
|
||||||
for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) {
|
for (i = 0; i < distance_ref_points_depth; i++)
|
||||||
counts[i] = hypervisor_counts[i];
|
counts[i] = hypervisor_counts[i];
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1329,7 +1330,7 @@ static void setup_cpu_associativity_change_counters(void)
|
||||||
*/
|
*/
|
||||||
static int update_cpu_associativity_changes_mask(void)
|
static int update_cpu_associativity_changes_mask(void)
|
||||||
{
|
{
|
||||||
int cpu = 0, nr_cpus = 0;
|
int cpu, nr_cpus = 0;
|
||||||
cpumask_t *changes = &cpu_associativity_changes_mask;
|
cpumask_t *changes = &cpu_associativity_changes_mask;
|
||||||
|
|
||||||
cpumask_clear(changes);
|
cpumask_clear(changes);
|
||||||
|
@ -1339,8 +1340,8 @@ static int update_cpu_associativity_changes_mask(void)
|
||||||
u8 *counts = vphn_cpu_change_counts[cpu];
|
u8 *counts = vphn_cpu_change_counts[cpu];
|
||||||
volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
|
volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
|
||||||
|
|
||||||
for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) {
|
for (i = 0; i < distance_ref_points_depth; i++) {
|
||||||
if (hypervisor_counts[i] > counts[i]) {
|
if (hypervisor_counts[i] != counts[i]) {
|
||||||
counts[i] = hypervisor_counts[i];
|
counts[i] = hypervisor_counts[i];
|
||||||
changed = 1;
|
changed = 1;
|
||||||
}
|
}
|
||||||
|
@ -1354,8 +1355,11 @@ static int update_cpu_associativity_changes_mask(void)
|
||||||
return nr_cpus;
|
return nr_cpus;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* 6 64-bit registers unpacked into 12 32-bit associativity values */
|
/*
|
||||||
#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32))
|
* 6 64-bit registers unpacked into 12 32-bit associativity values. To form
|
||||||
|
* the complete property we have to add the length in the first cell.
|
||||||
|
*/
|
||||||
|
#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Convert the associativity domain numbers returned from the hypervisor
|
* Convert the associativity domain numbers returned from the hypervisor
|
||||||
|
@ -1363,15 +1367,14 @@ static int update_cpu_associativity_changes_mask(void)
|
||||||
*/
|
*/
|
||||||
static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
|
static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
|
||||||
{
|
{
|
||||||
int i = 0;
|
int i, nr_assoc_doms = 0;
|
||||||
int nr_assoc_doms = 0;
|
|
||||||
const u16 *field = (const u16*) packed;
|
const u16 *field = (const u16*) packed;
|
||||||
|
|
||||||
#define VPHN_FIELD_UNUSED (0xffff)
|
#define VPHN_FIELD_UNUSED (0xffff)
|
||||||
#define VPHN_FIELD_MSB (0x8000)
|
#define VPHN_FIELD_MSB (0x8000)
|
||||||
#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
|
#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
|
||||||
|
|
||||||
for (i = 0; i < VPHN_ASSOC_BUFSIZE; i++) {
|
for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
|
||||||
if (*field == VPHN_FIELD_UNUSED) {
|
if (*field == VPHN_FIELD_UNUSED) {
|
||||||
/* All significant fields processed, and remaining
|
/* All significant fields processed, and remaining
|
||||||
* fields contain the reserved value of all 1's.
|
* fields contain the reserved value of all 1's.
|
||||||
|
@ -1379,14 +1382,12 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
|
||||||
*/
|
*/
|
||||||
unpacked[i] = *((u32*)field);
|
unpacked[i] = *((u32*)field);
|
||||||
field += 2;
|
field += 2;
|
||||||
}
|
} else if (*field & VPHN_FIELD_MSB) {
|
||||||
else if (*field & VPHN_FIELD_MSB) {
|
|
||||||
/* Data is in the lower 15 bits of this field */
|
/* Data is in the lower 15 bits of this field */
|
||||||
unpacked[i] = *field & VPHN_FIELD_MASK;
|
unpacked[i] = *field & VPHN_FIELD_MASK;
|
||||||
field++;
|
field++;
|
||||||
nr_assoc_doms++;
|
nr_assoc_doms++;
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
/* Data is in the lower 15 bits of this field
|
/* Data is in the lower 15 bits of this field
|
||||||
* concatenated with the next 16 bit field
|
* concatenated with the next 16 bit field
|
||||||
*/
|
*/
|
||||||
|
@ -1396,6 +1397,9 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* The first cell contains the length of the property */
|
||||||
|
unpacked[0] = nr_assoc_doms;
|
||||||
|
|
||||||
return nr_assoc_doms;
|
return nr_assoc_doms;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1405,7 +1409,7 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
|
||||||
*/
|
*/
|
||||||
static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
|
static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
|
||||||
{
|
{
|
||||||
long rc = 0;
|
long rc;
|
||||||
long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
|
long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
|
||||||
u64 flags = 1;
|
u64 flags = 1;
|
||||||
int hwcpu = get_hard_smp_processor_id(cpu);
|
int hwcpu = get_hard_smp_processor_id(cpu);
|
||||||
|
@ -1419,7 +1423,7 @@ static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
|
||||||
static long vphn_get_associativity(unsigned long cpu,
|
static long vphn_get_associativity(unsigned long cpu,
|
||||||
unsigned int *associativity)
|
unsigned int *associativity)
|
||||||
{
|
{
|
||||||
long rc = 0;
|
long rc;
|
||||||
|
|
||||||
rc = hcall_vphn(cpu, associativity);
|
rc = hcall_vphn(cpu, associativity);
|
||||||
|
|
||||||
|
@ -1445,9 +1449,9 @@ static long vphn_get_associativity(unsigned long cpu,
|
||||||
*/
|
*/
|
||||||
int arch_update_cpu_topology(void)
|
int arch_update_cpu_topology(void)
|
||||||
{
|
{
|
||||||
int cpu = 0, nid = 0, old_nid = 0;
|
int cpu, nid, old_nid;
|
||||||
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
|
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
|
||||||
struct sys_device *sysdev = NULL;
|
struct sys_device *sysdev;
|
||||||
|
|
||||||
for_each_cpu_mask(cpu, cpu_associativity_changes_mask) {
|
for_each_cpu_mask(cpu, cpu_associativity_changes_mask) {
|
||||||
vphn_get_associativity(cpu, associativity);
|
vphn_get_associativity(cpu, associativity);
|
||||||
|
@ -1512,7 +1516,8 @@ int start_topology_update(void)
|
||||||
{
|
{
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
if (firmware_has_feature(FW_FEATURE_VPHN)) {
|
if (firmware_has_feature(FW_FEATURE_VPHN) &&
|
||||||
|
get_lppaca()->shared_proc) {
|
||||||
vphn_enabled = 1;
|
vphn_enabled = 1;
|
||||||
setup_cpu_associativity_change_counters();
|
setup_cpu_associativity_change_counters();
|
||||||
init_timer_deferrable(&topology_timer);
|
init_timer_deferrable(&topology_timer);
|
||||||
|
|
|
@ -713,6 +713,13 @@ EXPORT_SYMBOL(arch_free_page);
|
||||||
/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
|
/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
|
||||||
extern long hcall_tracepoint_refcount;
|
extern long hcall_tracepoint_refcount;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Since the tracing code might execute hcalls we need to guard against
|
||||||
|
* recursion. One example of this are spinlocks calling H_YIELD on
|
||||||
|
* shared processor partitions.
|
||||||
|
*/
|
||||||
|
static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
|
||||||
|
|
||||||
void hcall_tracepoint_regfunc(void)
|
void hcall_tracepoint_regfunc(void)
|
||||||
{
|
{
|
||||||
hcall_tracepoint_refcount++;
|
hcall_tracepoint_refcount++;
|
||||||
|
@ -725,12 +732,42 @@ void hcall_tracepoint_unregfunc(void)
|
||||||
|
|
||||||
void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
|
void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
|
||||||
{
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
unsigned int *depth;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
|
|
||||||
|
depth = &__get_cpu_var(hcall_trace_depth);
|
||||||
|
|
||||||
|
if (*depth)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
(*depth)++;
|
||||||
trace_hcall_entry(opcode, args);
|
trace_hcall_entry(opcode, args);
|
||||||
|
(*depth)--;
|
||||||
|
|
||||||
|
out:
|
||||||
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __trace_hcall_exit(long opcode, unsigned long retval,
|
void __trace_hcall_exit(long opcode, unsigned long retval,
|
||||||
unsigned long *retbuf)
|
unsigned long *retbuf)
|
||||||
{
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
unsigned int *depth;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
|
|
||||||
|
depth = &__get_cpu_var(hcall_trace_depth);
|
||||||
|
|
||||||
|
if (*depth)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
(*depth)++;
|
||||||
trace_hcall_exit(opcode, retval, retbuf);
|
trace_hcall_exit(opcode, retval, retbuf);
|
||||||
|
(*depth)--;
|
||||||
|
|
||||||
|
out:
|
||||||
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -12,10 +12,8 @@
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask.h>
|
||||||
#include <asm/segment.h>
|
#include <asm/segment.h>
|
||||||
#include <asm/desc.h>
|
#include <asm/desc.h>
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
#endif
|
#include <asm/cacheflush.h>
|
||||||
|
|
||||||
#include "realmode/wakeup.h"
|
#include "realmode/wakeup.h"
|
||||||
#include "sleep.h"
|
#include "sleep.h"
|
||||||
|
@ -149,6 +147,15 @@ void __init acpi_reserve_wakeup_memory(void)
|
||||||
memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP");
|
memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int __init acpi_configure_wakeup_memory(void)
|
||||||
|
{
|
||||||
|
if (acpi_realmode)
|
||||||
|
set_memory_x(acpi_realmode, WAKEUP_SIZE >> PAGE_SHIFT);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
arch_initcall(acpi_configure_wakeup_memory);
|
||||||
|
|
||||||
|
|
||||||
static int __init acpi_sleep_setup(char *str)
|
static int __init acpi_sleep_setup(char *str)
|
||||||
{
|
{
|
||||||
|
|
|
@ -37,6 +37,9 @@ static struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
|
||||||
char *value = NULL;
|
char *value = NULL;
|
||||||
struct posix_acl *acl;
|
struct posix_acl *acl;
|
||||||
|
|
||||||
|
if (!IS_POSIXACL(inode))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
acl = get_cached_acl(inode, type);
|
acl = get_cached_acl(inode, type);
|
||||||
if (acl != ACL_NOT_CACHED)
|
if (acl != ACL_NOT_CACHED)
|
||||||
return acl;
|
return acl;
|
||||||
|
@ -84,6 +87,9 @@ static int btrfs_xattr_acl_get(struct dentry *dentry, const char *name,
|
||||||
struct posix_acl *acl;
|
struct posix_acl *acl;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
if (!IS_POSIXACL(dentry->d_inode))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
acl = btrfs_get_acl(dentry->d_inode, type);
|
acl = btrfs_get_acl(dentry->d_inode, type);
|
||||||
|
|
||||||
if (IS_ERR(acl))
|
if (IS_ERR(acl))
|
||||||
|
|
|
@ -562,7 +562,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
||||||
u64 em_len;
|
u64 em_len;
|
||||||
u64 em_start;
|
u64 em_start;
|
||||||
struct extent_map *em;
|
struct extent_map *em;
|
||||||
int ret;
|
int ret = -ENOMEM;
|
||||||
u32 *sums;
|
u32 *sums;
|
||||||
|
|
||||||
tree = &BTRFS_I(inode)->io_tree;
|
tree = &BTRFS_I(inode)->io_tree;
|
||||||
|
@ -577,6 +577,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
||||||
|
|
||||||
compressed_len = em->block_len;
|
compressed_len = em->block_len;
|
||||||
cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
|
cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
|
||||||
|
if (!cb)
|
||||||
|
goto out;
|
||||||
|
|
||||||
atomic_set(&cb->pending_bios, 0);
|
atomic_set(&cb->pending_bios, 0);
|
||||||
cb->errors = 0;
|
cb->errors = 0;
|
||||||
cb->inode = inode;
|
cb->inode = inode;
|
||||||
|
@ -597,13 +600,18 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
||||||
|
|
||||||
nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) /
|
nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) /
|
||||||
PAGE_CACHE_SIZE;
|
PAGE_CACHE_SIZE;
|
||||||
cb->compressed_pages = kmalloc(sizeof(struct page *) * nr_pages,
|
cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages,
|
||||||
GFP_NOFS);
|
GFP_NOFS);
|
||||||
|
if (!cb->compressed_pages)
|
||||||
|
goto fail1;
|
||||||
|
|
||||||
bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
|
bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
|
||||||
|
|
||||||
for (page_index = 0; page_index < nr_pages; page_index++) {
|
for (page_index = 0; page_index < nr_pages; page_index++) {
|
||||||
cb->compressed_pages[page_index] = alloc_page(GFP_NOFS |
|
cb->compressed_pages[page_index] = alloc_page(GFP_NOFS |
|
||||||
__GFP_HIGHMEM);
|
__GFP_HIGHMEM);
|
||||||
|
if (!cb->compressed_pages[page_index])
|
||||||
|
goto fail2;
|
||||||
}
|
}
|
||||||
cb->nr_pages = nr_pages;
|
cb->nr_pages = nr_pages;
|
||||||
|
|
||||||
|
@ -614,6 +622,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
||||||
cb->len = uncompressed_len;
|
cb->len = uncompressed_len;
|
||||||
|
|
||||||
comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
|
comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
|
||||||
|
if (!comp_bio)
|
||||||
|
goto fail2;
|
||||||
comp_bio->bi_private = cb;
|
comp_bio->bi_private = cb;
|
||||||
comp_bio->bi_end_io = end_compressed_bio_read;
|
comp_bio->bi_end_io = end_compressed_bio_read;
|
||||||
atomic_inc(&cb->pending_bios);
|
atomic_inc(&cb->pending_bios);
|
||||||
|
@ -681,6 +691,17 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
||||||
|
|
||||||
bio_put(comp_bio);
|
bio_put(comp_bio);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
fail2:
|
||||||
|
for (page_index = 0; page_index < nr_pages; page_index++)
|
||||||
|
free_page((unsigned long)cb->compressed_pages[page_index]);
|
||||||
|
|
||||||
|
kfree(cb->compressed_pages);
|
||||||
|
fail1:
|
||||||
|
kfree(cb);
|
||||||
|
out:
|
||||||
|
free_extent_map(em);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES];
|
static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES];
|
||||||
|
@ -900,7 +921,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __exit btrfs_exit_compress(void)
|
void btrfs_exit_compress(void)
|
||||||
{
|
{
|
||||||
free_workspaces();
|
free_workspaces();
|
||||||
}
|
}
|
||||||
|
|
|
@ -1550,6 +1550,7 @@ static int transaction_kthread(void *arg)
|
||||||
spin_unlock(&root->fs_info->new_trans_lock);
|
spin_unlock(&root->fs_info->new_trans_lock);
|
||||||
|
|
||||||
trans = btrfs_join_transaction(root, 1);
|
trans = btrfs_join_transaction(root, 1);
|
||||||
|
BUG_ON(IS_ERR(trans));
|
||||||
if (transid == trans->transid) {
|
if (transid == trans->transid) {
|
||||||
ret = btrfs_commit_transaction(trans, root);
|
ret = btrfs_commit_transaction(trans, root);
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
|
@ -2453,10 +2454,14 @@ int btrfs_commit_super(struct btrfs_root *root)
|
||||||
up_write(&root->fs_info->cleanup_work_sem);
|
up_write(&root->fs_info->cleanup_work_sem);
|
||||||
|
|
||||||
trans = btrfs_join_transaction(root, 1);
|
trans = btrfs_join_transaction(root, 1);
|
||||||
|
if (IS_ERR(trans))
|
||||||
|
return PTR_ERR(trans);
|
||||||
ret = btrfs_commit_transaction(trans, root);
|
ret = btrfs_commit_transaction(trans, root);
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
/* run commit again to drop the original snapshot */
|
/* run commit again to drop the original snapshot */
|
||||||
trans = btrfs_join_transaction(root, 1);
|
trans = btrfs_join_transaction(root, 1);
|
||||||
|
if (IS_ERR(trans))
|
||||||
|
return PTR_ERR(trans);
|
||||||
btrfs_commit_transaction(trans, root);
|
btrfs_commit_transaction(trans, root);
|
||||||
ret = btrfs_write_and_wait_transaction(NULL, root);
|
ret = btrfs_write_and_wait_transaction(NULL, root);
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
|
@ -2554,6 +2559,8 @@ int close_ctree(struct btrfs_root *root)
|
||||||
kfree(fs_info->chunk_root);
|
kfree(fs_info->chunk_root);
|
||||||
kfree(fs_info->dev_root);
|
kfree(fs_info->dev_root);
|
||||||
kfree(fs_info->csum_root);
|
kfree(fs_info->csum_root);
|
||||||
|
kfree(fs_info);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -171,6 +171,8 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
path = btrfs_alloc_path();
|
path = btrfs_alloc_path();
|
||||||
|
if (!path)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
|
if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
|
||||||
key.objectid = root->root_key.objectid;
|
key.objectid = root->root_key.objectid;
|
||||||
|
|
|
@ -320,11 +320,6 @@ static int caching_kthread(void *data)
|
||||||
if (!path)
|
if (!path)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
exclude_super_stripes(extent_root, block_group);
|
|
||||||
spin_lock(&block_group->space_info->lock);
|
|
||||||
block_group->space_info->bytes_readonly += block_group->bytes_super;
|
|
||||||
spin_unlock(&block_group->space_info->lock);
|
|
||||||
|
|
||||||
last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
|
last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -467,8 +462,10 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
|
||||||
cache->cached = BTRFS_CACHE_NO;
|
cache->cached = BTRFS_CACHE_NO;
|
||||||
}
|
}
|
||||||
spin_unlock(&cache->lock);
|
spin_unlock(&cache->lock);
|
||||||
if (ret == 1)
|
if (ret == 1) {
|
||||||
|
free_excluded_extents(fs_info->extent_root, cache);
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (load_cache_only)
|
if (load_cache_only)
|
||||||
|
@ -3344,8 +3341,10 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
|
||||||
u64 reserved;
|
u64 reserved;
|
||||||
u64 max_reclaim;
|
u64 max_reclaim;
|
||||||
u64 reclaimed = 0;
|
u64 reclaimed = 0;
|
||||||
|
long time_left;
|
||||||
int pause = 1;
|
int pause = 1;
|
||||||
int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
|
int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
|
||||||
|
int loops = 0;
|
||||||
|
|
||||||
block_rsv = &root->fs_info->delalloc_block_rsv;
|
block_rsv = &root->fs_info->delalloc_block_rsv;
|
||||||
space_info = block_rsv->space_info;
|
space_info = block_rsv->space_info;
|
||||||
|
@ -3358,7 +3357,7 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
|
||||||
|
|
||||||
max_reclaim = min(reserved, to_reclaim);
|
max_reclaim = min(reserved, to_reclaim);
|
||||||
|
|
||||||
while (1) {
|
while (loops < 1024) {
|
||||||
/* have the flusher threads jump in and do some IO */
|
/* have the flusher threads jump in and do some IO */
|
||||||
smp_mb();
|
smp_mb();
|
||||||
nr_pages = min_t(unsigned long, nr_pages,
|
nr_pages = min_t(unsigned long, nr_pages,
|
||||||
|
@ -3366,8 +3365,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
|
||||||
writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
|
writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
|
||||||
|
|
||||||
spin_lock(&space_info->lock);
|
spin_lock(&space_info->lock);
|
||||||
if (reserved > space_info->bytes_reserved)
|
if (reserved > space_info->bytes_reserved) {
|
||||||
|
loops = 0;
|
||||||
reclaimed += reserved - space_info->bytes_reserved;
|
reclaimed += reserved - space_info->bytes_reserved;
|
||||||
|
} else {
|
||||||
|
loops++;
|
||||||
|
}
|
||||||
reserved = space_info->bytes_reserved;
|
reserved = space_info->bytes_reserved;
|
||||||
spin_unlock(&space_info->lock);
|
spin_unlock(&space_info->lock);
|
||||||
|
|
||||||
|
@ -3378,7 +3381,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
|
|
||||||
__set_current_state(TASK_INTERRUPTIBLE);
|
__set_current_state(TASK_INTERRUPTIBLE);
|
||||||
schedule_timeout(pause);
|
time_left = schedule_timeout(pause);
|
||||||
|
|
||||||
|
/* We were interrupted, exit */
|
||||||
|
if (time_left)
|
||||||
|
break;
|
||||||
|
|
||||||
pause <<= 1;
|
pause <<= 1;
|
||||||
if (pause > HZ / 10)
|
if (pause > HZ / 10)
|
||||||
pause = HZ / 10;
|
pause = HZ / 10;
|
||||||
|
@ -3588,8 +3596,20 @@ void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
|
||||||
|
|
||||||
if (num_bytes > 0) {
|
if (num_bytes > 0) {
|
||||||
if (dest) {
|
if (dest) {
|
||||||
block_rsv_add_bytes(dest, num_bytes, 0);
|
spin_lock(&dest->lock);
|
||||||
} else {
|
if (!dest->full) {
|
||||||
|
u64 bytes_to_add;
|
||||||
|
|
||||||
|
bytes_to_add = dest->size - dest->reserved;
|
||||||
|
bytes_to_add = min(num_bytes, bytes_to_add);
|
||||||
|
dest->reserved += bytes_to_add;
|
||||||
|
if (dest->reserved >= dest->size)
|
||||||
|
dest->full = 1;
|
||||||
|
num_bytes -= bytes_to_add;
|
||||||
|
}
|
||||||
|
spin_unlock(&dest->lock);
|
||||||
|
}
|
||||||
|
if (num_bytes) {
|
||||||
spin_lock(&space_info->lock);
|
spin_lock(&space_info->lock);
|
||||||
space_info->bytes_reserved -= num_bytes;
|
space_info->bytes_reserved -= num_bytes;
|
||||||
spin_unlock(&space_info->lock);
|
spin_unlock(&space_info->lock);
|
||||||
|
@ -4012,6 +4032,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
|
||||||
|
|
||||||
num_bytes = ALIGN(num_bytes, root->sectorsize);
|
num_bytes = ALIGN(num_bytes, root->sectorsize);
|
||||||
atomic_dec(&BTRFS_I(inode)->outstanding_extents);
|
atomic_dec(&BTRFS_I(inode)->outstanding_extents);
|
||||||
|
WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0);
|
||||||
|
|
||||||
spin_lock(&BTRFS_I(inode)->accounting_lock);
|
spin_lock(&BTRFS_I(inode)->accounting_lock);
|
||||||
nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
|
nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
|
||||||
|
@ -5633,6 +5654,7 @@ use_block_rsv(struct btrfs_trans_handle *trans,
|
||||||
struct btrfs_root *root, u32 blocksize)
|
struct btrfs_root *root, u32 blocksize)
|
||||||
{
|
{
|
||||||
struct btrfs_block_rsv *block_rsv;
|
struct btrfs_block_rsv *block_rsv;
|
||||||
|
struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
block_rsv = get_block_rsv(trans, root);
|
block_rsv = get_block_rsv(trans, root);
|
||||||
|
@ -5640,14 +5662,39 @@ use_block_rsv(struct btrfs_trans_handle *trans,
|
||||||
if (block_rsv->size == 0) {
|
if (block_rsv->size == 0) {
|
||||||
ret = reserve_metadata_bytes(trans, root, block_rsv,
|
ret = reserve_metadata_bytes(trans, root, block_rsv,
|
||||||
blocksize, 0);
|
blocksize, 0);
|
||||||
if (ret)
|
/*
|
||||||
|
* If we couldn't reserve metadata bytes try and use some from
|
||||||
|
* the global reserve.
|
||||||
|
*/
|
||||||
|
if (ret && block_rsv != global_rsv) {
|
||||||
|
ret = block_rsv_use_bytes(global_rsv, blocksize);
|
||||||
|
if (!ret)
|
||||||
|
return global_rsv;
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
} else if (ret) {
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
}
|
||||||
return block_rsv;
|
return block_rsv;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = block_rsv_use_bytes(block_rsv, blocksize);
|
ret = block_rsv_use_bytes(block_rsv, blocksize);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
return block_rsv;
|
return block_rsv;
|
||||||
|
if (ret) {
|
||||||
|
WARN_ON(1);
|
||||||
|
ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize,
|
||||||
|
0);
|
||||||
|
if (!ret) {
|
||||||
|
spin_lock(&block_rsv->lock);
|
||||||
|
block_rsv->size += blocksize;
|
||||||
|
spin_unlock(&block_rsv->lock);
|
||||||
|
return block_rsv;
|
||||||
|
} else if (ret && block_rsv != global_rsv) {
|
||||||
|
ret = block_rsv_use_bytes(global_rsv, blocksize);
|
||||||
|
if (!ret)
|
||||||
|
return global_rsv;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return ERR_PTR(-ENOSPC);
|
return ERR_PTR(-ENOSPC);
|
||||||
}
|
}
|
||||||
|
@ -6221,6 +6268,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
|
||||||
BUG_ON(!wc);
|
BUG_ON(!wc);
|
||||||
|
|
||||||
trans = btrfs_start_transaction(tree_root, 0);
|
trans = btrfs_start_transaction(tree_root, 0);
|
||||||
|
BUG_ON(IS_ERR(trans));
|
||||||
|
|
||||||
if (block_rsv)
|
if (block_rsv)
|
||||||
trans->block_rsv = block_rsv;
|
trans->block_rsv = block_rsv;
|
||||||
|
|
||||||
|
@ -6318,6 +6367,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
|
||||||
|
|
||||||
btrfs_end_transaction_throttle(trans, tree_root);
|
btrfs_end_transaction_throttle(trans, tree_root);
|
||||||
trans = btrfs_start_transaction(tree_root, 0);
|
trans = btrfs_start_transaction(tree_root, 0);
|
||||||
|
BUG_ON(IS_ERR(trans));
|
||||||
if (block_rsv)
|
if (block_rsv)
|
||||||
trans->block_rsv = block_rsv;
|
trans->block_rsv = block_rsv;
|
||||||
}
|
}
|
||||||
|
@ -6446,6 +6496,8 @@ static noinline int relocate_inode_pages(struct inode *inode, u64 start,
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
ra = kzalloc(sizeof(*ra), GFP_NOFS);
|
ra = kzalloc(sizeof(*ra), GFP_NOFS);
|
||||||
|
if (!ra)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
mutex_lock(&inode->i_mutex);
|
mutex_lock(&inode->i_mutex);
|
||||||
first_index = start >> PAGE_CACHE_SHIFT;
|
first_index = start >> PAGE_CACHE_SHIFT;
|
||||||
|
@ -7477,7 +7529,7 @@ int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
|
||||||
BUG_ON(reloc_root->commit_root != NULL);
|
BUG_ON(reloc_root->commit_root != NULL);
|
||||||
while (1) {
|
while (1) {
|
||||||
trans = btrfs_join_transaction(root, 1);
|
trans = btrfs_join_transaction(root, 1);
|
||||||
BUG_ON(!trans);
|
BUG_ON(IS_ERR(trans));
|
||||||
|
|
||||||
mutex_lock(&root->fs_info->drop_mutex);
|
mutex_lock(&root->fs_info->drop_mutex);
|
||||||
ret = btrfs_drop_snapshot(trans, reloc_root);
|
ret = btrfs_drop_snapshot(trans, reloc_root);
|
||||||
|
@ -7535,7 +7587,7 @@ int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
|
||||||
|
|
||||||
if (found) {
|
if (found) {
|
||||||
trans = btrfs_start_transaction(root, 1);
|
trans = btrfs_start_transaction(root, 1);
|
||||||
BUG_ON(!trans);
|
BUG_ON(IS_ERR(trans));
|
||||||
ret = btrfs_commit_transaction(trans, root);
|
ret = btrfs_commit_transaction(trans, root);
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
}
|
}
|
||||||
|
@ -7779,7 +7831,7 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
|
||||||
|
|
||||||
|
|
||||||
trans = btrfs_start_transaction(extent_root, 1);
|
trans = btrfs_start_transaction(extent_root, 1);
|
||||||
BUG_ON(!trans);
|
BUG_ON(IS_ERR(trans));
|
||||||
|
|
||||||
if (extent_key->objectid == 0) {
|
if (extent_key->objectid == 0) {
|
||||||
ret = del_extent_zero(trans, extent_root, path, extent_key);
|
ret = del_extent_zero(trans, extent_root, path, extent_key);
|
||||||
|
@ -8270,6 +8322,13 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
|
||||||
if (block_group->cached == BTRFS_CACHE_STARTED)
|
if (block_group->cached == BTRFS_CACHE_STARTED)
|
||||||
wait_block_group_cache_done(block_group);
|
wait_block_group_cache_done(block_group);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We haven't cached this block group, which means we could
|
||||||
|
* possibly have excluded extents on this block group.
|
||||||
|
*/
|
||||||
|
if (block_group->cached == BTRFS_CACHE_NO)
|
||||||
|
free_excluded_extents(info->extent_root, block_group);
|
||||||
|
|
||||||
btrfs_remove_free_space_cache(block_group);
|
btrfs_remove_free_space_cache(block_group);
|
||||||
btrfs_put_block_group(block_group);
|
btrfs_put_block_group(block_group);
|
||||||
|
|
||||||
|
@ -8384,6 +8443,13 @@ int btrfs_read_block_groups(struct btrfs_root *root)
|
||||||
cache->flags = btrfs_block_group_flags(&cache->item);
|
cache->flags = btrfs_block_group_flags(&cache->item);
|
||||||
cache->sectorsize = root->sectorsize;
|
cache->sectorsize = root->sectorsize;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We need to exclude the super stripes now so that the space
|
||||||
|
* info has super bytes accounted for, otherwise we'll think
|
||||||
|
* we have more space than we actually do.
|
||||||
|
*/
|
||||||
|
exclude_super_stripes(root, cache);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* check for two cases, either we are full, and therefore
|
* check for two cases, either we are full, and therefore
|
||||||
* don't need to bother with the caching work since we won't
|
* don't need to bother with the caching work since we won't
|
||||||
|
@ -8392,12 +8458,10 @@ int btrfs_read_block_groups(struct btrfs_root *root)
|
||||||
* time, particularly in the full case.
|
* time, particularly in the full case.
|
||||||
*/
|
*/
|
||||||
if (found_key.offset == btrfs_block_group_used(&cache->item)) {
|
if (found_key.offset == btrfs_block_group_used(&cache->item)) {
|
||||||
exclude_super_stripes(root, cache);
|
|
||||||
cache->last_byte_to_unpin = (u64)-1;
|
cache->last_byte_to_unpin = (u64)-1;
|
||||||
cache->cached = BTRFS_CACHE_FINISHED;
|
cache->cached = BTRFS_CACHE_FINISHED;
|
||||||
free_excluded_extents(root, cache);
|
free_excluded_extents(root, cache);
|
||||||
} else if (btrfs_block_group_used(&cache->item) == 0) {
|
} else if (btrfs_block_group_used(&cache->item) == 0) {
|
||||||
exclude_super_stripes(root, cache);
|
|
||||||
cache->last_byte_to_unpin = (u64)-1;
|
cache->last_byte_to_unpin = (u64)-1;
|
||||||
cache->cached = BTRFS_CACHE_FINISHED;
|
cache->cached = BTRFS_CACHE_FINISHED;
|
||||||
add_new_free_space(cache, root->fs_info,
|
add_new_free_space(cache, root->fs_info,
|
||||||
|
|
|
@ -1865,7 +1865,7 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
|
||||||
bio_get(bio);
|
bio_get(bio);
|
||||||
|
|
||||||
if (tree->ops && tree->ops->submit_bio_hook)
|
if (tree->ops && tree->ops->submit_bio_hook)
|
||||||
tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
|
ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
|
||||||
mirror_num, bio_flags, start);
|
mirror_num, bio_flags, start);
|
||||||
else
|
else
|
||||||
submit_bio(rw, bio);
|
submit_bio(rw, bio);
|
||||||
|
@ -1920,6 +1920,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
|
||||||
nr = bio_get_nr_vecs(bdev);
|
nr = bio_get_nr_vecs(bdev);
|
||||||
|
|
||||||
bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
|
bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
|
||||||
|
if (!bio)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
bio_add_page(bio, page, page_size, offset);
|
bio_add_page(bio, page, page_size, offset);
|
||||||
bio->bi_end_io = end_io_func;
|
bio->bi_end_io = end_io_func;
|
||||||
|
@ -2126,7 +2128,7 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
|
||||||
ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
|
ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
|
||||||
&bio_flags);
|
&bio_flags);
|
||||||
if (bio)
|
if (bio)
|
||||||
submit_one_bio(READ, bio, 0, bio_flags);
|
ret = submit_one_bio(READ, bio, 0, bio_flags);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -536,6 +536,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
|
||||||
root = root->fs_info->csum_root;
|
root = root->fs_info->csum_root;
|
||||||
|
|
||||||
path = btrfs_alloc_path();
|
path = btrfs_alloc_path();
|
||||||
|
if (!path)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
|
key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
|
||||||
|
@ -548,7 +550,10 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
|
||||||
if (path->slots[0] == 0)
|
if (path->slots[0] == 0)
|
||||||
goto out;
|
goto out;
|
||||||
path->slots[0]--;
|
path->slots[0]--;
|
||||||
|
} else if (ret < 0) {
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
leaf = path->nodes[0];
|
leaf = path->nodes[0];
|
||||||
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
|
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
|
||||||
|
|
||||||
|
|
|
@ -793,8 +793,12 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
|
||||||
for (i = 0; i < num_pages; i++) {
|
for (i = 0; i < num_pages; i++) {
|
||||||
pages[i] = grab_cache_page(inode->i_mapping, index + i);
|
pages[i] = grab_cache_page(inode->i_mapping, index + i);
|
||||||
if (!pages[i]) {
|
if (!pages[i]) {
|
||||||
err = -ENOMEM;
|
int c;
|
||||||
BUG_ON(1);
|
for (c = i - 1; c >= 0; c--) {
|
||||||
|
unlock_page(pages[c]);
|
||||||
|
page_cache_release(pages[c]);
|
||||||
|
}
|
||||||
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
wait_on_page_writeback(pages[i]);
|
wait_on_page_writeback(pages[i]);
|
||||||
}
|
}
|
||||||
|
@ -946,6 +950,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
|
||||||
PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
|
PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
|
||||||
(sizeof(struct page *)));
|
(sizeof(struct page *)));
|
||||||
pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
|
pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
|
||||||
|
if (!pages) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
/* generic_write_checks can change our pos */
|
/* generic_write_checks can change our pos */
|
||||||
start_pos = pos;
|
start_pos = pos;
|
||||||
|
@ -984,8 +992,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
|
||||||
size_t write_bytes = min(iov_iter_count(&i),
|
size_t write_bytes = min(iov_iter_count(&i),
|
||||||
nrptrs * (size_t)PAGE_CACHE_SIZE -
|
nrptrs * (size_t)PAGE_CACHE_SIZE -
|
||||||
offset);
|
offset);
|
||||||
size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
|
size_t num_pages = (write_bytes + offset +
|
||||||
PAGE_CACHE_SHIFT;
|
PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
|
||||||
|
|
||||||
WARN_ON(num_pages > nrptrs);
|
WARN_ON(num_pages > nrptrs);
|
||||||
memset(pages, 0, sizeof(struct page *) * nrptrs);
|
memset(pages, 0, sizeof(struct page *) * nrptrs);
|
||||||
|
@ -1015,8 +1023,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
|
||||||
|
|
||||||
copied = btrfs_copy_from_user(pos, num_pages,
|
copied = btrfs_copy_from_user(pos, num_pages,
|
||||||
write_bytes, pages, &i);
|
write_bytes, pages, &i);
|
||||||
dirty_pages = (copied + PAGE_CACHE_SIZE - 1) >>
|
dirty_pages = (copied + offset + PAGE_CACHE_SIZE - 1) >>
|
||||||
PAGE_CACHE_SHIFT;
|
PAGE_CACHE_SHIFT;
|
||||||
|
|
||||||
if (num_pages > dirty_pages) {
|
if (num_pages > dirty_pages) {
|
||||||
if (copied > 0)
|
if (copied > 0)
|
||||||
|
|
|
@ -987,11 +987,18 @@ tree_search_offset(struct btrfs_block_group_cache *block_group,
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void unlink_free_space(struct btrfs_block_group_cache *block_group,
|
static inline void
|
||||||
struct btrfs_free_space *info)
|
__unlink_free_space(struct btrfs_block_group_cache *block_group,
|
||||||
|
struct btrfs_free_space *info)
|
||||||
{
|
{
|
||||||
rb_erase(&info->offset_index, &block_group->free_space_offset);
|
rb_erase(&info->offset_index, &block_group->free_space_offset);
|
||||||
block_group->free_extents--;
|
block_group->free_extents--;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void unlink_free_space(struct btrfs_block_group_cache *block_group,
|
||||||
|
struct btrfs_free_space *info)
|
||||||
|
{
|
||||||
|
__unlink_free_space(block_group, info);
|
||||||
block_group->free_space -= info->bytes;
|
block_group->free_space -= info->bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1016,14 +1023,18 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
|
||||||
u64 max_bytes;
|
u64 max_bytes;
|
||||||
u64 bitmap_bytes;
|
u64 bitmap_bytes;
|
||||||
u64 extent_bytes;
|
u64 extent_bytes;
|
||||||
|
u64 size = block_group->key.offset;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The goal is to keep the total amount of memory used per 1gb of space
|
* The goal is to keep the total amount of memory used per 1gb of space
|
||||||
* at or below 32k, so we need to adjust how much memory we allow to be
|
* at or below 32k, so we need to adjust how much memory we allow to be
|
||||||
* used by extent based free space tracking
|
* used by extent based free space tracking
|
||||||
*/
|
*/
|
||||||
max_bytes = MAX_CACHE_BYTES_PER_GIG *
|
if (size < 1024 * 1024 * 1024)
|
||||||
(div64_u64(block_group->key.offset, 1024 * 1024 * 1024));
|
max_bytes = MAX_CACHE_BYTES_PER_GIG;
|
||||||
|
else
|
||||||
|
max_bytes = MAX_CACHE_BYTES_PER_GIG *
|
||||||
|
div64_u64(size, 1024 * 1024 * 1024);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* we want to account for 1 more bitmap than what we have so we can make
|
* we want to account for 1 more bitmap than what we have so we can make
|
||||||
|
@ -1171,6 +1182,16 @@ static void add_new_bitmap(struct btrfs_block_group_cache *block_group,
|
||||||
recalculate_thresholds(block_group);
|
recalculate_thresholds(block_group);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void free_bitmap(struct btrfs_block_group_cache *block_group,
|
||||||
|
struct btrfs_free_space *bitmap_info)
|
||||||
|
{
|
||||||
|
unlink_free_space(block_group, bitmap_info);
|
||||||
|
kfree(bitmap_info->bitmap);
|
||||||
|
kfree(bitmap_info);
|
||||||
|
block_group->total_bitmaps--;
|
||||||
|
recalculate_thresholds(block_group);
|
||||||
|
}
|
||||||
|
|
||||||
static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group,
|
static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group,
|
||||||
struct btrfs_free_space *bitmap_info,
|
struct btrfs_free_space *bitmap_info,
|
||||||
u64 *offset, u64 *bytes)
|
u64 *offset, u64 *bytes)
|
||||||
|
@ -1195,6 +1216,7 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro
|
||||||
*/
|
*/
|
||||||
search_start = *offset;
|
search_start = *offset;
|
||||||
search_bytes = *bytes;
|
search_bytes = *bytes;
|
||||||
|
search_bytes = min(search_bytes, end - search_start + 1);
|
||||||
ret = search_bitmap(block_group, bitmap_info, &search_start,
|
ret = search_bitmap(block_group, bitmap_info, &search_start,
|
||||||
&search_bytes);
|
&search_bytes);
|
||||||
BUG_ON(ret < 0 || search_start != *offset);
|
BUG_ON(ret < 0 || search_start != *offset);
|
||||||
|
@ -1211,13 +1233,8 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro
|
||||||
|
|
||||||
if (*bytes) {
|
if (*bytes) {
|
||||||
struct rb_node *next = rb_next(&bitmap_info->offset_index);
|
struct rb_node *next = rb_next(&bitmap_info->offset_index);
|
||||||
if (!bitmap_info->bytes) {
|
if (!bitmap_info->bytes)
|
||||||
unlink_free_space(block_group, bitmap_info);
|
free_bitmap(block_group, bitmap_info);
|
||||||
kfree(bitmap_info->bitmap);
|
|
||||||
kfree(bitmap_info);
|
|
||||||
block_group->total_bitmaps--;
|
|
||||||
recalculate_thresholds(block_group);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* no entry after this bitmap, but we still have bytes to
|
* no entry after this bitmap, but we still have bytes to
|
||||||
|
@ -1250,13 +1267,8 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
|
|
||||||
goto again;
|
goto again;
|
||||||
} else if (!bitmap_info->bytes) {
|
} else if (!bitmap_info->bytes)
|
||||||
unlink_free_space(block_group, bitmap_info);
|
free_bitmap(block_group, bitmap_info);
|
||||||
kfree(bitmap_info->bitmap);
|
|
||||||
kfree(bitmap_info);
|
|
||||||
block_group->total_bitmaps--;
|
|
||||||
recalculate_thresholds(block_group);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1359,22 +1371,14 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
|
bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
|
||||||
u64 offset, u64 bytes)
|
struct btrfs_free_space *info, bool update_stat)
|
||||||
{
|
{
|
||||||
struct btrfs_free_space *right_info = NULL;
|
struct btrfs_free_space *left_info;
|
||||||
struct btrfs_free_space *left_info = NULL;
|
struct btrfs_free_space *right_info;
|
||||||
struct btrfs_free_space *info = NULL;
|
bool merged = false;
|
||||||
int ret = 0;
|
u64 offset = info->offset;
|
||||||
|
u64 bytes = info->bytes;
|
||||||
info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
|
|
||||||
if (!info)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
info->offset = offset;
|
|
||||||
info->bytes = bytes;
|
|
||||||
|
|
||||||
spin_lock(&block_group->tree_lock);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* first we want to see if there is free space adjacent to the range we
|
* first we want to see if there is free space adjacent to the range we
|
||||||
|
@ -1388,37 +1392,62 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
|
||||||
else
|
else
|
||||||
left_info = tree_search_offset(block_group, offset - 1, 0, 0);
|
left_info = tree_search_offset(block_group, offset - 1, 0, 0);
|
||||||
|
|
||||||
/*
|
|
||||||
* If there was no extent directly to the left or right of this new
|
|
||||||
* extent then we know we're going to have to allocate a new extent, so
|
|
||||||
* before we do that see if we need to drop this into a bitmap
|
|
||||||
*/
|
|
||||||
if ((!left_info || left_info->bitmap) &&
|
|
||||||
(!right_info || right_info->bitmap)) {
|
|
||||||
ret = insert_into_bitmap(block_group, info);
|
|
||||||
|
|
||||||
if (ret < 0) {
|
|
||||||
goto out;
|
|
||||||
} else if (ret) {
|
|
||||||
ret = 0;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (right_info && !right_info->bitmap) {
|
if (right_info && !right_info->bitmap) {
|
||||||
unlink_free_space(block_group, right_info);
|
if (update_stat)
|
||||||
|
unlink_free_space(block_group, right_info);
|
||||||
|
else
|
||||||
|
__unlink_free_space(block_group, right_info);
|
||||||
info->bytes += right_info->bytes;
|
info->bytes += right_info->bytes;
|
||||||
kfree(right_info);
|
kfree(right_info);
|
||||||
|
merged = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (left_info && !left_info->bitmap &&
|
if (left_info && !left_info->bitmap &&
|
||||||
left_info->offset + left_info->bytes == offset) {
|
left_info->offset + left_info->bytes == offset) {
|
||||||
unlink_free_space(block_group, left_info);
|
if (update_stat)
|
||||||
|
unlink_free_space(block_group, left_info);
|
||||||
|
else
|
||||||
|
__unlink_free_space(block_group, left_info);
|
||||||
info->offset = left_info->offset;
|
info->offset = left_info->offset;
|
||||||
info->bytes += left_info->bytes;
|
info->bytes += left_info->bytes;
|
||||||
kfree(left_info);
|
kfree(left_info);
|
||||||
|
merged = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return merged;
|
||||||
|
}
|
||||||
|
|
||||||
|
int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
|
||||||
|
u64 offset, u64 bytes)
|
||||||
|
{
|
||||||
|
struct btrfs_free_space *info;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
|
||||||
|
if (!info)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
info->offset = offset;
|
||||||
|
info->bytes = bytes;
|
||||||
|
|
||||||
|
spin_lock(&block_group->tree_lock);
|
||||||
|
|
||||||
|
if (try_merge_free_space(block_group, info, true))
|
||||||
|
goto link;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* There was no extent directly to the left or right of this new
|
||||||
|
* extent then we know we're going to have to allocate a new extent, so
|
||||||
|
* before we do that see if we need to drop this into a bitmap
|
||||||
|
*/
|
||||||
|
ret = insert_into_bitmap(block_group, info);
|
||||||
|
if (ret < 0) {
|
||||||
|
goto out;
|
||||||
|
} else if (ret) {
|
||||||
|
ret = 0;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
link:
|
||||||
ret = link_free_space(block_group, info);
|
ret = link_free_space(block_group, info);
|
||||||
if (ret)
|
if (ret)
|
||||||
kfree(info);
|
kfree(info);
|
||||||
|
@ -1621,6 +1650,7 @@ __btrfs_return_cluster_to_free_space(
|
||||||
node = rb_next(&entry->offset_index);
|
node = rb_next(&entry->offset_index);
|
||||||
rb_erase(&entry->offset_index, &cluster->root);
|
rb_erase(&entry->offset_index, &cluster->root);
|
||||||
BUG_ON(entry->bitmap);
|
BUG_ON(entry->bitmap);
|
||||||
|
try_merge_free_space(block_group, entry, false);
|
||||||
tree_insert_offset(&block_group->free_space_offset,
|
tree_insert_offset(&block_group->free_space_offset,
|
||||||
entry->offset, &entry->offset_index, 0);
|
entry->offset, &entry->offset_index, 0);
|
||||||
}
|
}
|
||||||
|
@ -1685,13 +1715,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
|
||||||
ret = offset;
|
ret = offset;
|
||||||
if (entry->bitmap) {
|
if (entry->bitmap) {
|
||||||
bitmap_clear_bits(block_group, entry, offset, bytes);
|
bitmap_clear_bits(block_group, entry, offset, bytes);
|
||||||
if (!entry->bytes) {
|
if (!entry->bytes)
|
||||||
unlink_free_space(block_group, entry);
|
free_bitmap(block_group, entry);
|
||||||
kfree(entry->bitmap);
|
|
||||||
kfree(entry);
|
|
||||||
block_group->total_bitmaps--;
|
|
||||||
recalculate_thresholds(block_group);
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
unlink_free_space(block_group, entry);
|
unlink_free_space(block_group, entry);
|
||||||
entry->offset += bytes;
|
entry->offset += bytes;
|
||||||
|
@ -1789,6 +1814,8 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
|
||||||
|
|
||||||
ret = search_start;
|
ret = search_start;
|
||||||
bitmap_clear_bits(block_group, entry, ret, bytes);
|
bitmap_clear_bits(block_group, entry, ret, bytes);
|
||||||
|
if (entry->bytes == 0)
|
||||||
|
free_bitmap(block_group, entry);
|
||||||
out:
|
out:
|
||||||
spin_unlock(&cluster->lock);
|
spin_unlock(&cluster->lock);
|
||||||
spin_unlock(&block_group->tree_lock);
|
spin_unlock(&block_group->tree_lock);
|
||||||
|
@ -1842,15 +1869,26 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
|
||||||
entry->offset += bytes;
|
entry->offset += bytes;
|
||||||
entry->bytes -= bytes;
|
entry->bytes -= bytes;
|
||||||
|
|
||||||
if (entry->bytes == 0) {
|
if (entry->bytes == 0)
|
||||||
rb_erase(&entry->offset_index, &cluster->root);
|
rb_erase(&entry->offset_index, &cluster->root);
|
||||||
kfree(entry);
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
spin_unlock(&cluster->lock);
|
spin_unlock(&cluster->lock);
|
||||||
|
|
||||||
|
if (!ret)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
spin_lock(&block_group->tree_lock);
|
||||||
|
|
||||||
|
block_group->free_space -= bytes;
|
||||||
|
if (entry->bytes == 0) {
|
||||||
|
block_group->free_extents--;
|
||||||
|
kfree(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock(&block_group->tree_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -416,7 +416,7 @@ static noinline int compress_file_range(struct inode *inode,
|
||||||
}
|
}
|
||||||
if (start == 0) {
|
if (start == 0) {
|
||||||
trans = btrfs_join_transaction(root, 1);
|
trans = btrfs_join_transaction(root, 1);
|
||||||
BUG_ON(!trans);
|
BUG_ON(IS_ERR(trans));
|
||||||
btrfs_set_trans_block_group(trans, inode);
|
btrfs_set_trans_block_group(trans, inode);
|
||||||
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
|
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
|
||||||
|
|
||||||
|
@ -612,6 +612,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
|
||||||
GFP_NOFS);
|
GFP_NOFS);
|
||||||
|
|
||||||
trans = btrfs_join_transaction(root, 1);
|
trans = btrfs_join_transaction(root, 1);
|
||||||
|
BUG_ON(IS_ERR(trans));
|
||||||
ret = btrfs_reserve_extent(trans, root,
|
ret = btrfs_reserve_extent(trans, root,
|
||||||
async_extent->compressed_size,
|
async_extent->compressed_size,
|
||||||
async_extent->compressed_size,
|
async_extent->compressed_size,
|
||||||
|
@ -771,7 +772,7 @@ static noinline int cow_file_range(struct inode *inode,
|
||||||
|
|
||||||
BUG_ON(root == root->fs_info->tree_root);
|
BUG_ON(root == root->fs_info->tree_root);
|
||||||
trans = btrfs_join_transaction(root, 1);
|
trans = btrfs_join_transaction(root, 1);
|
||||||
BUG_ON(!trans);
|
BUG_ON(IS_ERR(trans));
|
||||||
btrfs_set_trans_block_group(trans, inode);
|
btrfs_set_trans_block_group(trans, inode);
|
||||||
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
|
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
|
||||||
|
|
||||||
|
@ -1049,7 +1050,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
|
||||||
} else {
|
} else {
|
||||||
trans = btrfs_join_transaction(root, 1);
|
trans = btrfs_join_transaction(root, 1);
|
||||||
}
|
}
|
||||||
BUG_ON(!trans);
|
BUG_ON(IS_ERR(trans));
|
||||||
|
|
||||||
cow_start = (u64)-1;
|
cow_start = (u64)-1;
|
||||||
cur_offset = start;
|
cur_offset = start;
|
||||||
|
@ -1557,6 +1558,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
|
||||||
out_page:
|
out_page:
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
page_cache_release(page);
|
page_cache_release(page);
|
||||||
|
kfree(fixup);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1703,7 +1705,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
|
||||||
trans = btrfs_join_transaction_nolock(root, 1);
|
trans = btrfs_join_transaction_nolock(root, 1);
|
||||||
else
|
else
|
||||||
trans = btrfs_join_transaction(root, 1);
|
trans = btrfs_join_transaction(root, 1);
|
||||||
BUG_ON(!trans);
|
BUG_ON(IS_ERR(trans));
|
||||||
btrfs_set_trans_block_group(trans, inode);
|
btrfs_set_trans_block_group(trans, inode);
|
||||||
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
|
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
|
||||||
ret = btrfs_update_inode(trans, root, inode);
|
ret = btrfs_update_inode(trans, root, inode);
|
||||||
|
@ -1720,6 +1722,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
|
||||||
trans = btrfs_join_transaction_nolock(root, 1);
|
trans = btrfs_join_transaction_nolock(root, 1);
|
||||||
else
|
else
|
||||||
trans = btrfs_join_transaction(root, 1);
|
trans = btrfs_join_transaction(root, 1);
|
||||||
|
BUG_ON(IS_ERR(trans));
|
||||||
btrfs_set_trans_block_group(trans, inode);
|
btrfs_set_trans_block_group(trans, inode);
|
||||||
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
|
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
|
||||||
|
|
||||||
|
@ -2354,6 +2357,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
|
||||||
*/
|
*/
|
||||||
if (is_bad_inode(inode)) {
|
if (is_bad_inode(inode)) {
|
||||||
trans = btrfs_start_transaction(root, 0);
|
trans = btrfs_start_transaction(root, 0);
|
||||||
|
BUG_ON(IS_ERR(trans));
|
||||||
btrfs_orphan_del(trans, inode);
|
btrfs_orphan_del(trans, inode);
|
||||||
btrfs_end_transaction(trans, root);
|
btrfs_end_transaction(trans, root);
|
||||||
iput(inode);
|
iput(inode);
|
||||||
|
@ -2381,6 +2385,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
|
||||||
|
|
||||||
if (root->orphan_block_rsv || root->orphan_item_inserted) {
|
if (root->orphan_block_rsv || root->orphan_item_inserted) {
|
||||||
trans = btrfs_join_transaction(root, 1);
|
trans = btrfs_join_transaction(root, 1);
|
||||||
|
BUG_ON(IS_ERR(trans));
|
||||||
btrfs_end_transaction(trans, root);
|
btrfs_end_transaction(trans, root);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2641,7 +2646,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
|
||||||
path = btrfs_alloc_path();
|
path = btrfs_alloc_path();
|
||||||
if (!path) {
|
if (!path) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto err;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
path->leave_spinning = 1;
|
path->leave_spinning = 1;
|
||||||
|
@ -2714,9 +2719,10 @@ static int check_path_shared(struct btrfs_root *root,
|
||||||
struct extent_buffer *eb;
|
struct extent_buffer *eb;
|
||||||
int level;
|
int level;
|
||||||
u64 refs = 1;
|
u64 refs = 1;
|
||||||
int uninitialized_var(ret);
|
|
||||||
|
|
||||||
for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
|
for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!path->nodes[level])
|
if (!path->nodes[level])
|
||||||
break;
|
break;
|
||||||
eb = path->nodes[level];
|
eb = path->nodes[level];
|
||||||
|
@ -2727,7 +2733,7 @@ static int check_path_shared(struct btrfs_root *root,
|
||||||
if (refs > 1)
|
if (refs > 1)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
return ret; /* XXX callers? */
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -4134,7 +4140,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
|
||||||
}
|
}
|
||||||
srcu_read_unlock(&root->fs_info->subvol_srcu, index);
|
srcu_read_unlock(&root->fs_info->subvol_srcu, index);
|
||||||
|
|
||||||
if (root != sub_root) {
|
if (!IS_ERR(inode) && root != sub_root) {
|
||||||
down_read(&root->fs_info->cleanup_work_sem);
|
down_read(&root->fs_info->cleanup_work_sem);
|
||||||
if (!(inode->i_sb->s_flags & MS_RDONLY))
|
if (!(inode->i_sb->s_flags & MS_RDONLY))
|
||||||
btrfs_orphan_cleanup(sub_root);
|
btrfs_orphan_cleanup(sub_root);
|
||||||
|
@ -4347,6 +4353,8 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
|
||||||
trans = btrfs_join_transaction_nolock(root, 1);
|
trans = btrfs_join_transaction_nolock(root, 1);
|
||||||
else
|
else
|
||||||
trans = btrfs_join_transaction(root, 1);
|
trans = btrfs_join_transaction(root, 1);
|
||||||
|
if (IS_ERR(trans))
|
||||||
|
return PTR_ERR(trans);
|
||||||
btrfs_set_trans_block_group(trans, inode);
|
btrfs_set_trans_block_group(trans, inode);
|
||||||
if (nolock)
|
if (nolock)
|
||||||
ret = btrfs_end_transaction_nolock(trans, root);
|
ret = btrfs_end_transaction_nolock(trans, root);
|
||||||
|
@ -4372,6 +4380,7 @@ void btrfs_dirty_inode(struct inode *inode)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
trans = btrfs_join_transaction(root, 1);
|
trans = btrfs_join_transaction(root, 1);
|
||||||
|
BUG_ON(IS_ERR(trans));
|
||||||
btrfs_set_trans_block_group(trans, inode);
|
btrfs_set_trans_block_group(trans, inode);
|
||||||
|
|
||||||
ret = btrfs_update_inode(trans, root, inode);
|
ret = btrfs_update_inode(trans, root, inode);
|
||||||
|
@ -5176,6 +5185,8 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
|
||||||
em = NULL;
|
em = NULL;
|
||||||
btrfs_release_path(root, path);
|
btrfs_release_path(root, path);
|
||||||
trans = btrfs_join_transaction(root, 1);
|
trans = btrfs_join_transaction(root, 1);
|
||||||
|
if (IS_ERR(trans))
|
||||||
|
return ERR_CAST(trans);
|
||||||
goto again;
|
goto again;
|
||||||
}
|
}
|
||||||
map = kmap(page);
|
map = kmap(page);
|
||||||
|
@ -5280,8 +5291,8 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
|
||||||
btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
|
btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
|
||||||
|
|
||||||
trans = btrfs_join_transaction(root, 0);
|
trans = btrfs_join_transaction(root, 0);
|
||||||
if (!trans)
|
if (IS_ERR(trans))
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_CAST(trans);
|
||||||
|
|
||||||
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
|
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
|
||||||
|
|
||||||
|
@ -5505,7 +5516,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
|
||||||
* while we look for nocow cross refs
|
* while we look for nocow cross refs
|
||||||
*/
|
*/
|
||||||
trans = btrfs_join_transaction(root, 0);
|
trans = btrfs_join_transaction(root, 0);
|
||||||
if (!trans)
|
if (IS_ERR(trans))
|
||||||
goto must_cow;
|
goto must_cow;
|
||||||
|
|
||||||
if (can_nocow_odirect(trans, inode, start, len) == 1) {
|
if (can_nocow_odirect(trans, inode, start, len) == 1) {
|
||||||
|
@ -5640,7 +5651,7 @@ static void btrfs_endio_direct_write(struct bio *bio, int err)
|
||||||
BUG_ON(!ordered);
|
BUG_ON(!ordered);
|
||||||
|
|
||||||
trans = btrfs_join_transaction(root, 1);
|
trans = btrfs_join_transaction(root, 1);
|
||||||
if (!trans) {
|
if (IS_ERR(trans)) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
|
@ -203,7 +203,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
|
||||||
|
|
||||||
|
|
||||||
trans = btrfs_join_transaction(root, 1);
|
trans = btrfs_join_transaction(root, 1);
|
||||||
BUG_ON(!trans);
|
BUG_ON(IS_ERR(trans));
|
||||||
|
|
||||||
ret = btrfs_update_inode(trans, root, inode);
|
ret = btrfs_update_inode(trans, root, inode);
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
|
@ -907,6 +907,10 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
|
||||||
|
|
||||||
if (new_size > old_size) {
|
if (new_size > old_size) {
|
||||||
trans = btrfs_start_transaction(root, 0);
|
trans = btrfs_start_transaction(root, 0);
|
||||||
|
if (IS_ERR(trans)) {
|
||||||
|
ret = PTR_ERR(trans);
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
ret = btrfs_grow_device(trans, device, new_size);
|
ret = btrfs_grow_device(trans, device, new_size);
|
||||||
btrfs_commit_transaction(trans, root);
|
btrfs_commit_transaction(trans, root);
|
||||||
} else {
|
} else {
|
||||||
|
@ -1898,7 +1902,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
|
||||||
|
|
||||||
memcpy(&new_key, &key, sizeof(new_key));
|
memcpy(&new_key, &key, sizeof(new_key));
|
||||||
new_key.objectid = inode->i_ino;
|
new_key.objectid = inode->i_ino;
|
||||||
new_key.offset = key.offset + destoff - off;
|
if (off <= key.offset)
|
||||||
|
new_key.offset = key.offset + destoff - off;
|
||||||
|
else
|
||||||
|
new_key.offset = destoff;
|
||||||
|
|
||||||
trans = btrfs_start_transaction(root, 1);
|
trans = btrfs_start_transaction(root, 1);
|
||||||
if (IS_ERR(trans)) {
|
if (IS_ERR(trans)) {
|
||||||
|
@ -2082,7 +2089,7 @@ static long btrfs_ioctl_trans_start(struct file *file)
|
||||||
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
trans = btrfs_start_ioctl_transaction(root, 0);
|
trans = btrfs_start_ioctl_transaction(root, 0);
|
||||||
if (!trans)
|
if (IS_ERR(trans))
|
||||||
goto out_drop;
|
goto out_drop;
|
||||||
|
|
||||||
file->private_data = trans;
|
file->private_data = trans;
|
||||||
|
@ -2138,9 +2145,9 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
|
||||||
path->leave_spinning = 1;
|
path->leave_spinning = 1;
|
||||||
|
|
||||||
trans = btrfs_start_transaction(root, 1);
|
trans = btrfs_start_transaction(root, 1);
|
||||||
if (!trans) {
|
if (IS_ERR(trans)) {
|
||||||
btrfs_free_path(path);
|
btrfs_free_path(path);
|
||||||
return -ENOMEM;
|
return PTR_ERR(trans);
|
||||||
}
|
}
|
||||||
|
|
||||||
dir_id = btrfs_super_root_dir(&root->fs_info->super_copy);
|
dir_id = btrfs_super_root_dir(&root->fs_info->super_copy);
|
||||||
|
@ -2334,6 +2341,8 @@ static noinline long btrfs_ioctl_start_sync(struct file *file, void __user *argp
|
||||||
u64 transid;
|
u64 transid;
|
||||||
|
|
||||||
trans = btrfs_start_transaction(root, 0);
|
trans = btrfs_start_transaction(root, 0);
|
||||||
|
if (IS_ERR(trans))
|
||||||
|
return PTR_ERR(trans);
|
||||||
transid = trans->transid;
|
transid = trans->transid;
|
||||||
btrfs_commit_transaction_async(trans, root, 0);
|
btrfs_commit_transaction_async(trans, root, 0);
|
||||||
|
|
||||||
|
|
|
@ -141,7 +141,7 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
|
||||||
u64 file_offset)
|
u64 file_offset)
|
||||||
{
|
{
|
||||||
struct rb_root *root = &tree->tree;
|
struct rb_root *root = &tree->tree;
|
||||||
struct rb_node *prev;
|
struct rb_node *prev = NULL;
|
||||||
struct rb_node *ret;
|
struct rb_node *ret;
|
||||||
struct btrfs_ordered_extent *entry;
|
struct btrfs_ordered_extent *entry;
|
||||||
|
|
||||||
|
|
|
@ -260,6 +260,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
|
||||||
#else
|
#else
|
||||||
BUG();
|
BUG();
|
||||||
#endif
|
#endif
|
||||||
|
break;
|
||||||
case BTRFS_BLOCK_GROUP_ITEM_KEY:
|
case BTRFS_BLOCK_GROUP_ITEM_KEY:
|
||||||
bi = btrfs_item_ptr(l, i,
|
bi = btrfs_item_ptr(l, i,
|
||||||
struct btrfs_block_group_item);
|
struct btrfs_block_group_item);
|
||||||
|
|
|
@ -2028,6 +2028,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
trans = btrfs_start_transaction(root, 0);
|
trans = btrfs_start_transaction(root, 0);
|
||||||
|
BUG_ON(IS_ERR(trans));
|
||||||
trans->block_rsv = rc->block_rsv;
|
trans->block_rsv = rc->block_rsv;
|
||||||
|
|
||||||
ret = btrfs_block_rsv_check(trans, root, rc->block_rsv,
|
ret = btrfs_block_rsv_check(trans, root, rc->block_rsv,
|
||||||
|
@ -2147,6 +2148,12 @@ int prepare_to_merge(struct reloc_control *rc, int err)
|
||||||
}
|
}
|
||||||
|
|
||||||
trans = btrfs_join_transaction(rc->extent_root, 1);
|
trans = btrfs_join_transaction(rc->extent_root, 1);
|
||||||
|
if (IS_ERR(trans)) {
|
||||||
|
if (!err)
|
||||||
|
btrfs_block_rsv_release(rc->extent_root,
|
||||||
|
rc->block_rsv, num_bytes);
|
||||||
|
return PTR_ERR(trans);
|
||||||
|
}
|
||||||
|
|
||||||
if (!err) {
|
if (!err) {
|
||||||
if (num_bytes != rc->merging_rsv_size) {
|
if (num_bytes != rc->merging_rsv_size) {
|
||||||
|
@ -3222,6 +3229,7 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
|
||||||
trans = btrfs_join_transaction(root, 0);
|
trans = btrfs_join_transaction(root, 0);
|
||||||
if (IS_ERR(trans)) {
|
if (IS_ERR(trans)) {
|
||||||
btrfs_free_path(path);
|
btrfs_free_path(path);
|
||||||
|
ret = PTR_ERR(trans);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3628,6 +3636,7 @@ int prepare_to_relocate(struct reloc_control *rc)
|
||||||
set_reloc_control(rc);
|
set_reloc_control(rc);
|
||||||
|
|
||||||
trans = btrfs_join_transaction(rc->extent_root, 1);
|
trans = btrfs_join_transaction(rc->extent_root, 1);
|
||||||
|
BUG_ON(IS_ERR(trans));
|
||||||
btrfs_commit_transaction(trans, rc->extent_root);
|
btrfs_commit_transaction(trans, rc->extent_root);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -3657,6 +3666,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
trans = btrfs_start_transaction(rc->extent_root, 0);
|
trans = btrfs_start_transaction(rc->extent_root, 0);
|
||||||
|
BUG_ON(IS_ERR(trans));
|
||||||
|
|
||||||
if (update_backref_cache(trans, &rc->backref_cache)) {
|
if (update_backref_cache(trans, &rc->backref_cache)) {
|
||||||
btrfs_end_transaction(trans, rc->extent_root);
|
btrfs_end_transaction(trans, rc->extent_root);
|
||||||
|
@ -3804,7 +3814,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
|
||||||
|
|
||||||
/* get rid of pinned extents */
|
/* get rid of pinned extents */
|
||||||
trans = btrfs_join_transaction(rc->extent_root, 1);
|
trans = btrfs_join_transaction(rc->extent_root, 1);
|
||||||
btrfs_commit_transaction(trans, rc->extent_root);
|
if (IS_ERR(trans))
|
||||||
|
err = PTR_ERR(trans);
|
||||||
|
else
|
||||||
|
btrfs_commit_transaction(trans, rc->extent_root);
|
||||||
out_free:
|
out_free:
|
||||||
btrfs_free_block_rsv(rc->extent_root, rc->block_rsv);
|
btrfs_free_block_rsv(rc->extent_root, rc->block_rsv);
|
||||||
btrfs_free_path(path);
|
btrfs_free_path(path);
|
||||||
|
@ -4022,6 +4035,7 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
trans = btrfs_start_transaction(root->fs_info->tree_root, 0);
|
trans = btrfs_start_transaction(root->fs_info->tree_root, 0);
|
||||||
|
BUG_ON(IS_ERR(trans));
|
||||||
|
|
||||||
memset(&root->root_item.drop_progress, 0,
|
memset(&root->root_item.drop_progress, 0,
|
||||||
sizeof(root->root_item.drop_progress));
|
sizeof(root->root_item.drop_progress));
|
||||||
|
@ -4125,6 +4139,11 @@ int btrfs_recover_relocation(struct btrfs_root *root)
|
||||||
set_reloc_control(rc);
|
set_reloc_control(rc);
|
||||||
|
|
||||||
trans = btrfs_join_transaction(rc->extent_root, 1);
|
trans = btrfs_join_transaction(rc->extent_root, 1);
|
||||||
|
if (IS_ERR(trans)) {
|
||||||
|
unset_reloc_control(rc);
|
||||||
|
err = PTR_ERR(trans);
|
||||||
|
goto out_free;
|
||||||
|
}
|
||||||
|
|
||||||
rc->merge_reloc_tree = 1;
|
rc->merge_reloc_tree = 1;
|
||||||
|
|
||||||
|
@ -4154,9 +4173,13 @@ int btrfs_recover_relocation(struct btrfs_root *root)
|
||||||
unset_reloc_control(rc);
|
unset_reloc_control(rc);
|
||||||
|
|
||||||
trans = btrfs_join_transaction(rc->extent_root, 1);
|
trans = btrfs_join_transaction(rc->extent_root, 1);
|
||||||
btrfs_commit_transaction(trans, rc->extent_root);
|
if (IS_ERR(trans))
|
||||||
out:
|
err = PTR_ERR(trans);
|
||||||
|
else
|
||||||
|
btrfs_commit_transaction(trans, rc->extent_root);
|
||||||
|
out_free:
|
||||||
kfree(rc);
|
kfree(rc);
|
||||||
|
out:
|
||||||
while (!list_empty(&reloc_roots)) {
|
while (!list_empty(&reloc_roots)) {
|
||||||
reloc_root = list_entry(reloc_roots.next,
|
reloc_root = list_entry(reloc_roots.next,
|
||||||
struct btrfs_root, root_list);
|
struct btrfs_root, root_list);
|
||||||
|
|
|
@ -383,7 +383,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
|
||||||
struct btrfs_fs_devices **fs_devices)
|
struct btrfs_fs_devices **fs_devices)
|
||||||
{
|
{
|
||||||
substring_t args[MAX_OPT_ARGS];
|
substring_t args[MAX_OPT_ARGS];
|
||||||
char *opts, *p;
|
char *opts, *orig, *p;
|
||||||
int error = 0;
|
int error = 0;
|
||||||
int intarg;
|
int intarg;
|
||||||
|
|
||||||
|
@ -397,6 +397,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
|
||||||
opts = kstrdup(options, GFP_KERNEL);
|
opts = kstrdup(options, GFP_KERNEL);
|
||||||
if (!opts)
|
if (!opts)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
orig = opts;
|
||||||
|
|
||||||
while ((p = strsep(&opts, ",")) != NULL) {
|
while ((p = strsep(&opts, ",")) != NULL) {
|
||||||
int token;
|
int token;
|
||||||
|
@ -432,7 +433,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
|
||||||
}
|
}
|
||||||
|
|
||||||
out_free_opts:
|
out_free_opts:
|
||||||
kfree(opts);
|
kfree(orig);
|
||||||
out:
|
out:
|
||||||
/*
|
/*
|
||||||
* If no subvolume name is specified we use the default one. Allocate
|
* If no subvolume name is specified we use the default one. Allocate
|
||||||
|
@ -623,6 +624,8 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
|
||||||
btrfs_wait_ordered_extents(root, 0, 0);
|
btrfs_wait_ordered_extents(root, 0, 0);
|
||||||
|
|
||||||
trans = btrfs_start_transaction(root, 0);
|
trans = btrfs_start_transaction(root, 0);
|
||||||
|
if (IS_ERR(trans))
|
||||||
|
return PTR_ERR(trans);
|
||||||
ret = btrfs_commit_transaction(trans, root);
|
ret = btrfs_commit_transaction(trans, root);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -761,6 +764,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
|
||||||
}
|
}
|
||||||
|
|
||||||
btrfs_close_devices(fs_devices);
|
btrfs_close_devices(fs_devices);
|
||||||
|
kfree(fs_info);
|
||||||
|
kfree(tree_root);
|
||||||
} else {
|
} else {
|
||||||
char b[BDEVNAME_SIZE];
|
char b[BDEVNAME_SIZE];
|
||||||
|
|
||||||
|
|
|
@ -1161,6 +1161,11 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
|
||||||
INIT_DELAYED_WORK(&ac->work, do_async_commit);
|
INIT_DELAYED_WORK(&ac->work, do_async_commit);
|
||||||
ac->root = root;
|
ac->root = root;
|
||||||
ac->newtrans = btrfs_join_transaction(root, 0);
|
ac->newtrans = btrfs_join_transaction(root, 0);
|
||||||
|
if (IS_ERR(ac->newtrans)) {
|
||||||
|
int err = PTR_ERR(ac->newtrans);
|
||||||
|
kfree(ac);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
/* take transaction reference */
|
/* take transaction reference */
|
||||||
mutex_lock(&root->fs_info->trans_mutex);
|
mutex_lock(&root->fs_info->trans_mutex);
|
||||||
|
|
|
@ -338,6 +338,12 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
|
||||||
}
|
}
|
||||||
dst_copy = kmalloc(item_size, GFP_NOFS);
|
dst_copy = kmalloc(item_size, GFP_NOFS);
|
||||||
src_copy = kmalloc(item_size, GFP_NOFS);
|
src_copy = kmalloc(item_size, GFP_NOFS);
|
||||||
|
if (!dst_copy || !src_copy) {
|
||||||
|
btrfs_release_path(root, path);
|
||||||
|
kfree(dst_copy);
|
||||||
|
kfree(src_copy);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
read_extent_buffer(eb, src_copy, src_ptr, item_size);
|
read_extent_buffer(eb, src_copy, src_ptr, item_size);
|
||||||
|
|
||||||
|
@ -665,6 +671,9 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
|
||||||
btrfs_dir_item_key_to_cpu(leaf, di, &location);
|
btrfs_dir_item_key_to_cpu(leaf, di, &location);
|
||||||
name_len = btrfs_dir_name_len(leaf, di);
|
name_len = btrfs_dir_name_len(leaf, di);
|
||||||
name = kmalloc(name_len, GFP_NOFS);
|
name = kmalloc(name_len, GFP_NOFS);
|
||||||
|
if (!name)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
|
read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
|
||||||
btrfs_release_path(root, path);
|
btrfs_release_path(root, path);
|
||||||
|
|
||||||
|
@ -744,6 +753,9 @@ static noinline int backref_in_log(struct btrfs_root *log,
|
||||||
int match = 0;
|
int match = 0;
|
||||||
|
|
||||||
path = btrfs_alloc_path();
|
path = btrfs_alloc_path();
|
||||||
|
if (!path)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
|
ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -967,6 +979,8 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
|
||||||
key.offset = (u64)-1;
|
key.offset = (u64)-1;
|
||||||
|
|
||||||
path = btrfs_alloc_path();
|
path = btrfs_alloc_path();
|
||||||
|
if (!path)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
||||||
|
@ -1178,6 +1192,9 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
|
||||||
|
|
||||||
name_len = btrfs_dir_name_len(eb, di);
|
name_len = btrfs_dir_name_len(eb, di);
|
||||||
name = kmalloc(name_len, GFP_NOFS);
|
name = kmalloc(name_len, GFP_NOFS);
|
||||||
|
if (!name)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
log_type = btrfs_dir_type(eb, di);
|
log_type = btrfs_dir_type(eb, di);
|
||||||
read_extent_buffer(eb, name, (unsigned long)(di + 1),
|
read_extent_buffer(eb, name, (unsigned long)(di + 1),
|
||||||
name_len);
|
name_len);
|
||||||
|
@ -1692,6 +1709,8 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
|
||||||
root_owner = btrfs_header_owner(parent);
|
root_owner = btrfs_header_owner(parent);
|
||||||
|
|
||||||
next = btrfs_find_create_tree_block(root, bytenr, blocksize);
|
next = btrfs_find_create_tree_block(root, bytenr, blocksize);
|
||||||
|
if (!next)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
if (*level == 1) {
|
if (*level == 1) {
|
||||||
wc->process_func(root, next, wc, ptr_gen);
|
wc->process_func(root, next, wc, ptr_gen);
|
||||||
|
@ -2032,6 +2051,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
|
||||||
wait_log_commit(trans, log_root_tree,
|
wait_log_commit(trans, log_root_tree,
|
||||||
log_root_tree->log_transid);
|
log_root_tree->log_transid);
|
||||||
mutex_unlock(&log_root_tree->log_mutex);
|
mutex_unlock(&log_root_tree->log_mutex);
|
||||||
|
ret = 0;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
atomic_set(&log_root_tree->log_commit[index2], 1);
|
atomic_set(&log_root_tree->log_commit[index2], 1);
|
||||||
|
@ -2096,7 +2116,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
|
||||||
smp_mb();
|
smp_mb();
|
||||||
if (waitqueue_active(&root->log_commit_wait[index1]))
|
if (waitqueue_active(&root->log_commit_wait[index1]))
|
||||||
wake_up(&root->log_commit_wait[index1]);
|
wake_up(&root->log_commit_wait[index1]);
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void free_log_tree(struct btrfs_trans_handle *trans,
|
static void free_log_tree(struct btrfs_trans_handle *trans,
|
||||||
|
@ -2194,6 +2214,9 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
|
||||||
|
|
||||||
log = root->log_root;
|
log = root->log_root;
|
||||||
path = btrfs_alloc_path();
|
path = btrfs_alloc_path();
|
||||||
|
if (!path)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino,
|
di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino,
|
||||||
name, name_len, -1);
|
name, name_len, -1);
|
||||||
if (IS_ERR(di)) {
|
if (IS_ERR(di)) {
|
||||||
|
@ -2594,6 +2617,9 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
|
||||||
|
|
||||||
ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
|
ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
|
||||||
nr * sizeof(u32), GFP_NOFS);
|
nr * sizeof(u32), GFP_NOFS);
|
||||||
|
if (!ins_data)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
ins_sizes = (u32 *)ins_data;
|
ins_sizes = (u32 *)ins_data;
|
||||||
ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
|
ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
|
||||||
|
|
||||||
|
@ -2725,7 +2751,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
|
||||||
log = root->log_root;
|
log = root->log_root;
|
||||||
|
|
||||||
path = btrfs_alloc_path();
|
path = btrfs_alloc_path();
|
||||||
|
if (!path)
|
||||||
|
return -ENOMEM;
|
||||||
dst_path = btrfs_alloc_path();
|
dst_path = btrfs_alloc_path();
|
||||||
|
if (!dst_path) {
|
||||||
|
btrfs_free_path(path);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
min_key.objectid = inode->i_ino;
|
min_key.objectid = inode->i_ino;
|
||||||
min_key.type = BTRFS_INODE_ITEM_KEY;
|
min_key.type = BTRFS_INODE_ITEM_KEY;
|
||||||
|
@ -3080,6 +3112,7 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
|
||||||
BUG_ON(!path);
|
BUG_ON(!path);
|
||||||
|
|
||||||
trans = btrfs_start_transaction(fs_info->tree_root, 0);
|
trans = btrfs_start_transaction(fs_info->tree_root, 0);
|
||||||
|
BUG_ON(IS_ERR(trans));
|
||||||
|
|
||||||
wc.trans = trans;
|
wc.trans = trans;
|
||||||
wc.pin = 1;
|
wc.pin = 1;
|
||||||
|
|
|
@ -1213,6 +1213,10 @@ static int btrfs_rm_dev_item(struct btrfs_root *root,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
trans = btrfs_start_transaction(root, 0);
|
trans = btrfs_start_transaction(root, 0);
|
||||||
|
if (IS_ERR(trans)) {
|
||||||
|
btrfs_free_path(path);
|
||||||
|
return PTR_ERR(trans);
|
||||||
|
}
|
||||||
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
|
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
|
||||||
key.type = BTRFS_DEV_ITEM_KEY;
|
key.type = BTRFS_DEV_ITEM_KEY;
|
||||||
key.offset = device->devid;
|
key.offset = device->devid;
|
||||||
|
@ -1606,6 +1610,12 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
|
||||||
}
|
}
|
||||||
|
|
||||||
trans = btrfs_start_transaction(root, 0);
|
trans = btrfs_start_transaction(root, 0);
|
||||||
|
if (IS_ERR(trans)) {
|
||||||
|
kfree(device);
|
||||||
|
ret = PTR_ERR(trans);
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
|
||||||
lock_chunks(root);
|
lock_chunks(root);
|
||||||
|
|
||||||
device->writeable = 1;
|
device->writeable = 1;
|
||||||
|
@ -1873,7 +1883,7 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
trans = btrfs_start_transaction(root, 0);
|
trans = btrfs_start_transaction(root, 0);
|
||||||
BUG_ON(!trans);
|
BUG_ON(IS_ERR(trans));
|
||||||
|
|
||||||
lock_chunks(root);
|
lock_chunks(root);
|
||||||
|
|
||||||
|
@ -2047,7 +2057,7 @@ int btrfs_balance(struct btrfs_root *dev_root)
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
|
|
||||||
trans = btrfs_start_transaction(dev_root, 0);
|
trans = btrfs_start_transaction(dev_root, 0);
|
||||||
BUG_ON(!trans);
|
BUG_ON(IS_ERR(trans));
|
||||||
|
|
||||||
ret = btrfs_grow_device(trans, device, old_size);
|
ret = btrfs_grow_device(trans, device, old_size);
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
|
@ -2213,6 +2223,11 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
|
||||||
|
|
||||||
/* Shrinking succeeded, else we would be at "done". */
|
/* Shrinking succeeded, else we would be at "done". */
|
||||||
trans = btrfs_start_transaction(root, 0);
|
trans = btrfs_start_transaction(root, 0);
|
||||||
|
if (IS_ERR(trans)) {
|
||||||
|
ret = PTR_ERR(trans);
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
|
||||||
lock_chunks(root);
|
lock_chunks(root);
|
||||||
|
|
||||||
device->disk_total_bytes = new_size;
|
device->disk_total_bytes = new_size;
|
||||||
|
|
|
@ -372,6 +372,10 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
|
||||||
|
|
||||||
ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
|
ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
if (!ppace) {
|
||||||
|
cERROR(1, "DACL memory allocation error");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < num_aces; ++i) {
|
for (i = 0; i < num_aces; ++i) {
|
||||||
ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
|
ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
|
||||||
|
|
|
@ -136,9 +136,6 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ses->status == CifsExiting)
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Give demultiplex thread up to 10 seconds to reconnect, should be
|
* Give demultiplex thread up to 10 seconds to reconnect, should be
|
||||||
* greater than cifs socket timeout which is 7 seconds
|
* greater than cifs socket timeout which is 7 seconds
|
||||||
|
@ -156,7 +153,7 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
|
||||||
* retrying until process is killed or server comes
|
* retrying until process is killed or server comes
|
||||||
* back on-line
|
* back on-line
|
||||||
*/
|
*/
|
||||||
if (!tcon->retry || ses->status == CifsExiting) {
|
if (!tcon->retry) {
|
||||||
cFYI(1, "gave up waiting on reconnect in smb_init");
|
cFYI(1, "gave up waiting on reconnect in smb_init");
|
||||||
return -EHOSTDOWN;
|
return -EHOSTDOWN;
|
||||||
}
|
}
|
||||||
|
|
|
@ -337,8 +337,12 @@ cifs_echo_request(struct work_struct *work)
|
||||||
struct TCP_Server_Info *server = container_of(work,
|
struct TCP_Server_Info *server = container_of(work,
|
||||||
struct TCP_Server_Info, echo.work);
|
struct TCP_Server_Info, echo.work);
|
||||||
|
|
||||||
/* no need to ping if we got a response recently */
|
/*
|
||||||
if (time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
|
* We cannot send an echo until the NEGOTIATE_PROTOCOL request is done.
|
||||||
|
* Also, no need to ping if we got a response recently
|
||||||
|
*/
|
||||||
|
if (server->tcpStatus != CifsGood ||
|
||||||
|
time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
|
||||||
goto requeue_echo;
|
goto requeue_echo;
|
||||||
|
|
||||||
rc = CIFSSMBEcho(server);
|
rc = CIFSSMBEcho(server);
|
||||||
|
@ -578,12 +582,12 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
|
||||||
else if (reconnect == 1)
|
else if (reconnect == 1)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
length += 4; /* account for rfc1002 hdr */
|
total_read += 4; /* account for rfc1002 hdr */
|
||||||
|
|
||||||
|
dump_smb(smb_buffer, total_read);
|
||||||
dump_smb(smb_buffer, length);
|
if (checkSMB(smb_buffer, smb_buffer->Mid, total_read)) {
|
||||||
if (checkSMB(smb_buffer, smb_buffer->Mid, total_read+4)) {
|
cifs_dump_mem("Bad SMB: ", smb_buffer,
|
||||||
cifs_dump_mem("Bad SMB: ", smb_buffer, 48);
|
total_read < 48 ? total_read : 48);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -633,11 +637,11 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
|
||||||
mid_entry->largeBuf = isLargeBuf;
|
mid_entry->largeBuf = isLargeBuf;
|
||||||
multi_t2_fnd:
|
multi_t2_fnd:
|
||||||
mid_entry->midState = MID_RESPONSE_RECEIVED;
|
mid_entry->midState = MID_RESPONSE_RECEIVED;
|
||||||
list_del_init(&mid_entry->qhead);
|
|
||||||
mid_entry->callback(mid_entry);
|
|
||||||
#ifdef CONFIG_CIFS_STATS2
|
#ifdef CONFIG_CIFS_STATS2
|
||||||
mid_entry->when_received = jiffies;
|
mid_entry->when_received = jiffies;
|
||||||
#endif
|
#endif
|
||||||
|
list_del_init(&mid_entry->qhead);
|
||||||
|
mid_entry->callback(mid_entry);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
mid_entry = NULL;
|
mid_entry = NULL;
|
||||||
|
|
|
@ -1662,10 +1662,10 @@ static ssize_t
|
||||||
cifs_iovec_write(struct file *file, const struct iovec *iov,
|
cifs_iovec_write(struct file *file, const struct iovec *iov,
|
||||||
unsigned long nr_segs, loff_t *poffset)
|
unsigned long nr_segs, loff_t *poffset)
|
||||||
{
|
{
|
||||||
size_t total_written = 0;
|
unsigned int written;
|
||||||
unsigned int written = 0;
|
unsigned long num_pages, npages, i;
|
||||||
unsigned long num_pages, npages;
|
size_t copied, len, cur_len;
|
||||||
size_t copied, len, cur_len, i;
|
ssize_t total_written = 0;
|
||||||
struct kvec *to_send;
|
struct kvec *to_send;
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
struct iov_iter it;
|
struct iov_iter it;
|
||||||
|
@ -1821,7 +1821,8 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
int xid;
|
int xid;
|
||||||
unsigned int total_read, bytes_read = 0;
|
ssize_t total_read;
|
||||||
|
unsigned int bytes_read = 0;
|
||||||
size_t len, cur_len;
|
size_t len, cur_len;
|
||||||
int iov_offset = 0;
|
int iov_offset = 0;
|
||||||
struct cifs_sb_info *cifs_sb;
|
struct cifs_sb_info *cifs_sb;
|
||||||
|
|
|
@ -359,6 +359,10 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
|
/* enable signing if server requires it */
|
||||||
|
if (server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
|
||||||
|
in_buf->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
|
||||||
|
|
||||||
mutex_lock(&server->srv_mutex);
|
mutex_lock(&server->srv_mutex);
|
||||||
mid = AllocMidQEntry(in_buf, server);
|
mid = AllocMidQEntry(in_buf, server);
|
||||||
if (mid == NULL) {
|
if (mid == NULL) {
|
||||||
|
|
|
@ -252,13 +252,13 @@ struct cred *cred_alloc_blank(void)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
atomic_set(&new->usage, 1);
|
atomic_set(&new->usage, 1);
|
||||||
|
#ifdef CONFIG_DEBUG_CREDENTIALS
|
||||||
|
new->magic = CRED_MAGIC;
|
||||||
|
#endif
|
||||||
|
|
||||||
if (security_cred_alloc_blank(new, GFP_KERNEL) < 0)
|
if (security_cred_alloc_blank(new, GFP_KERNEL) < 0)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_CREDENTIALS
|
|
||||||
new->magic = CRED_MAGIC;
|
|
||||||
#endif
|
|
||||||
return new;
|
return new;
|
||||||
|
|
||||||
error:
|
error:
|
||||||
|
@ -657,6 +657,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
|
||||||
validate_creds(old);
|
validate_creds(old);
|
||||||
|
|
||||||
*new = *old;
|
*new = *old;
|
||||||
|
atomic_set(&new->usage, 1);
|
||||||
|
set_cred_subscribers(new, 0);
|
||||||
get_uid(new->user);
|
get_uid(new->user);
|
||||||
get_group_info(new->group_info);
|
get_group_info(new->group_info);
|
||||||
|
|
||||||
|
@ -674,8 +676,6 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
|
||||||
if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
|
if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
atomic_set(&new->usage, 1);
|
|
||||||
set_cred_subscribers(new, 0);
|
|
||||||
put_cred(old);
|
put_cred(old);
|
||||||
validate_creds(new);
|
validate_creds(new);
|
||||||
return new;
|
return new;
|
||||||
|
@ -748,7 +748,11 @@ bool creds_are_invalid(const struct cred *cred)
|
||||||
if (cred->magic != CRED_MAGIC)
|
if (cred->magic != CRED_MAGIC)
|
||||||
return true;
|
return true;
|
||||||
#ifdef CONFIG_SECURITY_SELINUX
|
#ifdef CONFIG_SECURITY_SELINUX
|
||||||
if (selinux_is_enabled()) {
|
/*
|
||||||
|
* cred->security == NULL if security_cred_alloc_blank() or
|
||||||
|
* security_prepare_creds() returned an error.
|
||||||
|
*/
|
||||||
|
if (selinux_is_enabled() && cred->security) {
|
||||||
if ((unsigned long) cred->security < PAGE_SIZE)
|
if ((unsigned long) cred->security < PAGE_SIZE)
|
||||||
return true;
|
return true;
|
||||||
if ((*(u32 *)cred->security & 0xffffff00) ==
|
if ((*(u32 *)cred->security & 0xffffff00) ==
|
||||||
|
|
|
@ -3198,7 +3198,11 @@ static void selinux_cred_free(struct cred *cred)
|
||||||
{
|
{
|
||||||
struct task_security_struct *tsec = cred->security;
|
struct task_security_struct *tsec = cred->security;
|
||||||
|
|
||||||
BUG_ON((unsigned long) cred->security < PAGE_SIZE);
|
/*
|
||||||
|
* cred->security == NULL if security_cred_alloc_blank() or
|
||||||
|
* security_prepare_creds() returned an error.
|
||||||
|
*/
|
||||||
|
BUG_ON(cred->security && (unsigned long) cred->security < PAGE_SIZE);
|
||||||
cred->security = (void *) 0x7UL;
|
cred->security = (void *) 0x7UL;
|
||||||
kfree(tsec);
|
kfree(tsec);
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,7 +50,11 @@ static void aaci_ac97_select_codec(struct aaci *aaci, struct snd_ac97 *ac97)
|
||||||
if (v & SLFR_1RXV)
|
if (v & SLFR_1RXV)
|
||||||
readl(aaci->base + AACI_SL1RX);
|
readl(aaci->base + AACI_SL1RX);
|
||||||
|
|
||||||
writel(maincr, aaci->base + AACI_MAINCR);
|
if (maincr != readl(aaci->base + AACI_MAINCR)) {
|
||||||
|
writel(maincr, aaci->base + AACI_MAINCR);
|
||||||
|
readl(aaci->base + AACI_MAINCR);
|
||||||
|
udelay(1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -993,6 +997,8 @@ static unsigned int __devinit aaci_size_fifo(struct aaci *aaci)
|
||||||
* disabling the channel doesn't clear the FIFO.
|
* disabling the channel doesn't clear the FIFO.
|
||||||
*/
|
*/
|
||||||
writel(aaci->maincr & ~MAINCR_IE, aaci->base + AACI_MAINCR);
|
writel(aaci->maincr & ~MAINCR_IE, aaci->base + AACI_MAINCR);
|
||||||
|
readl(aaci->base + AACI_MAINCR);
|
||||||
|
udelay(1);
|
||||||
writel(aaci->maincr, aaci->base + AACI_MAINCR);
|
writel(aaci->maincr, aaci->base + AACI_MAINCR);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue