diff options
57 files changed, 708 insertions, 266 deletions
@@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 38 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc4 NAME = Flesh-Eating Bats with Fangs # *DOCUMENTATION* diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c index ffdf87be295..82079545adc 100644 --- a/arch/arm/mach-ep93xx/core.c +++ b/arch/arm/mach-ep93xx/core.c @@ -838,7 +838,7 @@ EXPORT_SYMBOL(ep93xx_i2s_release); static struct resource ep93xx_ac97_resources[] = { { .start = EP93XX_AAC_PHYS_BASE, - .end = EP93XX_AAC_PHYS_BASE + 0xb0 - 1, + .end = EP93XX_AAC_PHYS_BASE + 0xac - 1, .flags = IORESOURCE_MEM, }, { diff --git a/arch/arm/mach-imx/mach-mx25_3ds.c b/arch/arm/mach-imx/mach-mx25_3ds.c index aa76cfd9f34..8382e790207 100644 --- a/arch/arm/mach-imx/mach-mx25_3ds.c +++ b/arch/arm/mach-imx/mach-mx25_3ds.c @@ -180,7 +180,7 @@ static const uint32_t mx25pdk_keymap[] = { KEY(3, 3, KEY_POWER), }; -static const struct matrix_keymap_data mx25pdk_keymap_data __initdata = { +static const struct matrix_keymap_data mx25pdk_keymap_data __initconst = { .keymap = mx25pdk_keymap, .keymap_size = ARRAY_SIZE(mx25pdk_keymap), }; diff --git a/arch/arm/mach-mxs/clock-mx23.c b/arch/arm/mach-mxs/clock-mx23.c index b1a362ebfde..ca72a05ed9c 100644 --- a/arch/arm/mach-mxs/clock-mx23.c +++ b/arch/arm/mach-mxs/clock-mx23.c @@ -304,7 +304,7 @@ static int name##_set_rate(struct clk *clk, unsigned long rate) \ reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \ reg &= ~BM_CLKCTRL_##dr##_DIV; \ reg |= div << BP_CLKCTRL_##dr##_DIV; \ - if (reg | (1 << clk->enable_shift)) { \ + if (reg & (1 << clk->enable_shift)) { \ pr_err("%s: clock is gated\n", __func__); \ return -EINVAL; \ } \ @@ -347,7 +347,7 @@ static int name##_set_parent(struct clk *clk, struct clk *parent) \ { \ if (parent != clk->parent) { \ __raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit, \ - HW_CLKCTRL_CLKSEQ_TOG); \ + CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG); \ clk->parent = parent; \ } \ \ diff --git a/arch/arm/mach-mxs/clock-mx28.c b/arch/arm/mach-mxs/clock-mx28.c index 56312c092a9..fd1c4c54b8e 100644 --- a/arch/arm/mach-mxs/clock-mx28.c +++ b/arch/arm/mach-mxs/clock-mx28.c @@ -355,12 +355,12 @@ static int name##_set_rate(struct clk *clk, unsigned long rate) \ } else { \ reg &= ~BM_CLKCTRL_##dr##_DIV; \ reg |= div << BP_CLKCTRL_##dr##_DIV; \ - if (reg | (1 << clk->enable_shift)) { \ + if (reg & (1 << clk->enable_shift)) { \ pr_err("%s: clock is gated\n", __func__); \ return -EINVAL; \ } \ } \ - __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU); \ + __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \ \ for (i = 10000; i; i--) \ if (!(__raw_readl(CLKCTRL_BASE_ADDR + \ @@ -483,7 +483,7 @@ static int name##_set_parent(struct clk *clk, struct clk *parent) \ { \ if (parent != clk->parent) { \ __raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit, \ - HW_CLKCTRL_CLKSEQ_TOG); \ + CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG); \ clk->parent = parent; \ } \ \ @@ -609,7 +609,6 @@ static struct clk_lookup lookups[] = { _REGISTER_CLOCK("duart", NULL, uart_clk) _REGISTER_CLOCK("imx28-fec.0", NULL, fec_clk) _REGISTER_CLOCK("imx28-fec.1", NULL, fec_clk) - _REGISTER_CLOCK("fec.0", NULL, fec_clk) _REGISTER_CLOCK("rtc", NULL, rtc_clk) _REGISTER_CLOCK("pll2", NULL, pll2_clk) _REGISTER_CLOCK(NULL, "hclk", hbus_clk) diff --git a/arch/arm/mach-mxs/clock.c b/arch/arm/mach-mxs/clock.c index e7d2269cf70..a7093c88e6a 100644 --- a/arch/arm/mach-mxs/clock.c +++ b/arch/arm/mach-mxs/clock.c @@ -57,7 +57,6 @@ static void __clk_disable(struct clk *clk) if (clk->disable) clk->disable(clk); __clk_disable(clk->parent); - __clk_disable(clk->secondary); } } @@ -68,7 +67,6 @@ static int __clk_enable(struct clk *clk) if (clk->usecount++ == 0) { __clk_enable(clk->parent); - __clk_enable(clk->secondary); if (clk->enable) clk->enable(clk); diff --git a/arch/arm/mach-mxs/gpio.c b/arch/arm/mach-mxs/gpio.c index d7ad7a61366..cb0c0e83a52 100644 --- a/arch/arm/mach-mxs/gpio.c +++ b/arch/arm/mach-mxs/gpio.c @@ -139,6 +139,8 @@ static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc) struct mxs_gpio_port *port = (struct mxs_gpio_port *)get_irq_data(irq); u32 gpio_irq_no_base = port->virtual_irq_start; + desc->irq_data.chip->irq_ack(&desc->irq_data); + irq_stat = __raw_readl(port->base + PINCTRL_IRQSTAT(port->id)) & __raw_readl(port->base + PINCTRL_IRQEN(port->id)); diff --git a/arch/arm/mach-mxs/include/mach/clock.h b/arch/arm/mach-mxs/include/mach/clock.h index 041e276d8a3..592c9ab5d76 100644 --- a/arch/arm/mach-mxs/include/mach/clock.h +++ b/arch/arm/mach-mxs/include/mach/clock.h @@ -29,8 +29,6 @@ struct clk { int id; /* Source clock this clk depends on */ struct clk *parent; - /* Secondary clock to enable/disable with this clock */ - struct clk *secondary; /* Reference count of clock enable/disable */ __s8 usecount; /* Register bit position for clock's enable/disable control. */ diff --git a/arch/arm/mach-omap1/lcd_dma.c b/arch/arm/mach-omap1/lcd_dma.c index c9088d85da0..453809359ba 100644 --- a/arch/arm/mach-omap1/lcd_dma.c +++ b/arch/arm/mach-omap1/lcd_dma.c @@ -37,7 +37,7 @@ int omap_lcd_dma_running(void) * On OMAP1510, internal LCD controller will start the transfer * when it gets enabled, so assume DMA running if LCD enabled. */ - if (cpu_is_omap1510()) + if (cpu_is_omap15xx()) if (omap_readw(OMAP_LCDC_CONTROL) & OMAP_LCDC_CTRL_LCD_EN) return 1; @@ -95,7 +95,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_single_transfer); void omap_set_lcd_dma_b1_rotation(int rotate) { - if (cpu_is_omap1510()) { + if (cpu_is_omap15xx()) { printk(KERN_ERR "DMA rotation is not supported in 1510 mode\n"); BUG(); return; @@ -106,7 +106,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_b1_rotation); void omap_set_lcd_dma_b1_mirror(int mirror) { - if (cpu_is_omap1510()) { + if (cpu_is_omap15xx()) { printk(KERN_ERR "DMA mirror is not supported in 1510 mode\n"); BUG(); } @@ -116,7 +116,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_b1_mirror); void omap_set_lcd_dma_b1_vxres(unsigned long vxres) { - if (cpu_is_omap1510()) { + if (cpu_is_omap15xx()) { printk(KERN_ERR "DMA virtual resulotion is not supported " "in 1510 mode\n"); BUG(); @@ -127,7 +127,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_b1_vxres); void omap_set_lcd_dma_b1_scale(unsigned int xscale, unsigned int yscale) { - if (cpu_is_omap1510()) { + if (cpu_is_omap15xx()) { printk(KERN_ERR "DMA scale is not supported in 1510 mode\n"); BUG(); } @@ -177,7 +177,7 @@ static void set_b1_regs(void) bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1); /* 1510 DMA requires the bottom address to be 2 more * than the actual last memory access location. */ - if (cpu_is_omap1510() && + if (cpu_is_omap15xx() && lcd_dma.data_type == OMAP_DMA_DATA_TYPE_S32) bottom += 2; ei = PIXSTEP(0, 0, 1, 0); @@ -241,7 +241,7 @@ static void set_b1_regs(void) return; /* Suppress warning about uninitialized vars */ } - if (cpu_is_omap1510()) { + if (cpu_is_omap15xx()) { omap_writew(top >> 16, OMAP1510_DMA_LCD_TOP_F1_U); omap_writew(top, OMAP1510_DMA_LCD_TOP_F1_L); omap_writew(bottom >> 16, OMAP1510_DMA_LCD_BOT_F1_U); @@ -343,7 +343,7 @@ void omap_free_lcd_dma(void) BUG(); return; } - if (!cpu_is_omap1510()) + if (!cpu_is_omap15xx()) omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1, OMAP1610_DMA_LCD_CCR); lcd_dma.reserved = 0; @@ -360,7 +360,7 @@ void omap_enable_lcd_dma(void) * connected. Otherwise the OMAP internal controller will * start the transfer when it gets enabled. */ - if (cpu_is_omap1510() || !lcd_dma.ext_ctrl) + if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl) return; w = omap_readw(OMAP1610_DMA_LCD_CTRL); @@ -378,14 +378,14 @@ EXPORT_SYMBOL(omap_enable_lcd_dma); void omap_setup_lcd_dma(void) { BUG_ON(lcd_dma.active); - if (!cpu_is_omap1510()) { + if (!cpu_is_omap15xx()) { /* Set some reasonable defaults */ omap_writew(0x5440, OMAP1610_DMA_LCD_CCR); omap_writew(0x9102, OMAP1610_DMA_LCD_CSDP); omap_writew(0x0004, OMAP1610_DMA_LCD_LCH_CTRL); } set_b1_regs(); - if (!cpu_is_omap1510()) { + if (!cpu_is_omap15xx()) { u16 w; w = omap_readw(OMAP1610_DMA_LCD_CCR); @@ -407,7 +407,7 @@ void omap_stop_lcd_dma(void) u16 w; lcd_dma.active = 0; - if (cpu_is_omap1510() || !lcd_dma.ext_ctrl) + if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl) return; w = omap_readw(OMAP1610_DMA_LCD_CCR); diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c index f83fc335c61..6885d2fac18 100644 --- a/arch/arm/mach-omap1/time.c +++ b/arch/arm/mach-omap1/time.c @@ -44,7 +44,6 @@ #include <linux/clocksource.h> #include <linux/clockchips.h> #include <linux/io.h> -#include <linux/sched.h> #include <asm/system.h> #include <mach/hardware.h> diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c index e906e05bb41..9a2a31e011c 100644 --- a/arch/arm/mach-omap2/board-devkit8000.c +++ b/arch/arm/mach-omap2/board-devkit8000.c @@ -115,9 +115,6 @@ static struct omap2_hsmmc_info mmc[] = { static int devkit8000_panel_enable_lcd(struct omap_dss_device *dssdev) { - twl_i2c_write_u8(TWL4030_MODULE_GPIO, 0x80, REG_GPIODATADIR1); - twl_i2c_write_u8(TWL4030_MODULE_LED, 0x0, 0x0); - if (gpio_is_valid(dssdev->reset_gpio)) gpio_set_value_cansleep(dssdev->reset_gpio, 1); return 0; @@ -247,6 +244,8 @@ static struct gpio_led gpio_leds[]; static int devkit8000_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) { + int ret; + omap_mux_init_gpio(29, OMAP_PIN_INPUT); /* gpio + 0 is "mmc0_cd" (input/IRQ) */ mmc[0].gpio_cd = gpio + 0; @@ -255,17 +254,23 @@ static int devkit8000_twl_gpio_setup(struct device *dev, /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */ gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; - /* gpio + 1 is "LCD_PWREN" (out, active high) */ - devkit8000_lcd_device.reset_gpio = gpio + 1; - gpio_request(devkit8000_lcd_device.reset_gpio, "LCD_PWREN"); - /* Disable until needed */ - gpio_direction_output(devkit8000_lcd_device.reset_gpio, 0); + /* TWL4030_GPIO_MAX + 0 is "LCD_PWREN" (out, active high) */ + devkit8000_lcd_device.reset_gpio = gpio + TWL4030_GPIO_MAX + 0; + ret = gpio_request_one(devkit8000_lcd_device.reset_gpio, + GPIOF_DIR_OUT | GPIOF_INIT_LOW, "LCD_PWREN"); + if (ret < 0) { + devkit8000_lcd_device.reset_gpio = -EINVAL; + printk(KERN_ERR "Failed to request GPIO for LCD_PWRN\n"); + } /* gpio + 7 is "DVI_PD" (out, active low) */ devkit8000_dvi_device.reset_gpio = gpio + 7; - gpio_request(devkit8000_dvi_device.reset_gpio, "DVI PowerDown"); - /* Disable until needed */ - gpio_direction_output(devkit8000_dvi_device.reset_gpio, 0); + ret = gpio_request_one(devkit8000_dvi_device.reset_gpio, + GPIOF_DIR_OUT | GPIOF_INIT_LOW, "DVI PowerDown"); + if (ret < 0) { + devkit8000_dvi_device.reset_gpio = -EINVAL; + printk(KERN_ERR "Failed to request GPIO for DVI PowerDown\n"); + } return 0; } diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c index e001a048dc0..e944025d5ef 100644 --- a/arch/arm/mach-omap2/board-omap4panda.c +++ b/arch/arm/mach-omap2/board-omap4panda.c @@ -409,8 +409,6 @@ static void __init omap4_panda_init(void) platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices)); omap_serial_init(); omap4_twl6030_hsmmc_init(mmc); - /* OMAP4 Panda uses internal transceiver so register nop transceiver */ - usb_nop_xceiv_register(); omap4_ehci_init(); usb_musb_init(&musb_board_data); } diff --git a/arch/arm/mach-omap2/board-rm680.c b/arch/arm/mach-omap2/board-rm680.c index cb77be7ac44..39a71bb8a30 100644 --- a/arch/arm/mach-omap2/board-rm680.c +++ b/arch/arm/mach-omap2/board-rm680.c @@ -40,9 +40,6 @@ static struct regulator_consumer_supply rm680_vemmc_consumers[] = { static struct regulator_init_data rm680_vemmc = { .constraints = { .name = "rm680_vemmc", - .min_uV = 2900000, - .max_uV = 2900000, - .apply_uV = 1, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_STATUS diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c index fae49d12bc7..98148b6c36e 100644 --- a/arch/arm/mach-omap2/mux.c +++ b/arch/arm/mach-omap2/mux.c @@ -1000,6 +1000,7 @@ int __init omap_mux_init(const char *name, u32 flags, if (!partition->base) { pr_err("%s: Could not ioremap mux partition at 0x%08x\n", __func__, partition->phys); + kfree(partition); return -ENODEV; } diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index a4aa1920a75..2f864e4b085 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c @@ -168,9 +168,10 @@ static void omap3_core_restore_context(void) * once during boot sequence, but this works as we are not using secure * services. */ -static void omap3_save_secure_ram_context(u32 target_mpu_state) +static void omap3_save_secure_ram_context(void) { u32 ret; + int mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm); if (omap_type() != OMAP2_DEVICE_TYPE_GP) { /* @@ -181,7 +182,7 @@ static void omap3_save_secure_ram_context(u32 target_mpu_state) pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); ret = _omap_save_secure_sram((u32 *) __pa(omap3_secure_ram_storage)); - pwrdm_set_next_pwrst(mpu_pwrdm, target_mpu_state); + pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state); /* Following is for error tracking, it should not happen */ if (ret) { printk(KERN_ERR "save_secure_sram() returns %08x\n", @@ -1094,7 +1095,7 @@ static int __init omap3_pm_init(void) local_fiq_disable(); omap_dma_global_context_save(); - omap3_save_secure_ram_context(PWRDM_POWER_ON); + omap3_save_secure_ram_context(); omap_dma_global_context_restore(); local_irq_enable(); diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c index 77ecebf3fae..c37e823266d 100644 --- a/arch/arm/mach-omap2/smartreflex.c +++ b/arch/arm/mach-omap2/smartreflex.c @@ -780,8 +780,7 @@ static int omap_sr_autocomp_show(void *data, u64 *val) struct omap_sr *sr_info = (struct omap_sr *) data; if (!sr_info) { - pr_warning("%s: omap_sr struct for sr_%s not found\n", - __func__, sr_info->voltdm->name); + pr_warning("%s: omap_sr struct not found\n", __func__); return -EINVAL; } @@ -795,8 +794,7 @@ static int omap_sr_autocomp_store(void *data, u64 val) struct omap_sr *sr_info = (struct omap_sr *) data; if (!sr_info) { - pr_warning("%s: omap_sr struct for sr_%s not found\n", - __func__, sr_info->voltdm->name); + pr_warning("%s: omap_sr struct not found\n", __func__); return -EINVAL; } @@ -834,7 +832,8 @@ static int __init omap_sr_probe(struct platform_device *pdev) if (!pdata) { dev_err(&pdev->dev, "%s: platform data missing\n", __func__); - return -EINVAL; + ret = -EINVAL; + goto err_free_devinfo; } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -966,7 +965,7 @@ static int __devexit omap_sr_remove(struct platform_device *pdev) } sr_info = _sr_lookup(pdata->voltdm); - if (!sr_info) { + if (IS_ERR(sr_info)) { dev_warn(&pdev->dev, "%s: omap_sr struct not found\n", __func__); return -EINVAL; diff --git a/arch/arm/mach-omap2/voltage.c b/arch/arm/mach-omap2/voltage.c index ed6079c94c5..12be525b8df 100644 --- a/arch/arm/mach-omap2/voltage.c +++ b/arch/arm/mach-omap2/voltage.c @@ -471,6 +471,7 @@ static void __init vdd_debugfs_init(struct omap_vdd_info *vdd) strcat(name, vdd->voltdm.name); vdd->debug_dir = debugfs_create_dir(name, voltage_dir); + kfree(name); if (IS_ERR(vdd->debug_dir)) { pr_warning("%s: Unable to create debugfs directory for" " vdd_%s\n", __func__, vdd->voltdm.name); diff --git a/arch/arm/plat-mxc/include/mach/uncompress.h b/arch/arm/plat-mxc/include/mach/uncompress.h index 3a70ebf0477..ff469c4f1d7 100644 --- a/arch/arm/plat-mxc/include/mach/uncompress.h +++ b/arch/arm/plat-mxc/include/mach/uncompress.h @@ -95,6 +95,7 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id) case MACH_TYPE_MX35_3DS: case MACH_TYPE_PCM043: case MACH_TYPE_LILLY1131: + case MACH_TYPE_VPR200: uart_base = MX3X_UART1_BASE_ADDR; break; case MACH_TYPE_MAGX_ZN5: @@ -102,6 +103,7 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id) break; case MACH_TYPE_MX51_BABBAGE: case MACH_TYPE_EUKREA_CPUIMX51SD: + case MACH_TYPE_MX51_3DS: uart_base = MX51_UART1_BASE_ADDR; break; case MACH_TYPE_MX50_RDP: diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types index 2fea897ebeb..9d6feaabbe7 100644 --- a/arch/arm/tools/mach-types +++ b/arch/arm/tools/mach-types @@ -12,7 +12,7 @@ # # http://www.arm.linux.org.uk/developer/machines/?action=new # -# Last update: Sun Dec 12 23:24:27 2010 +# Last update: Mon Feb 7 08:59:27 2011 # # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number # @@ -2240,7 +2240,7 @@ arm_ultimator2 MACH_ARM_ULTIMATOR2 ARM_ULTIMATOR2 2250 vs_v210 MACH_VS_V210 VS_V210 2252 vs_v212 MACH_VS_V212 VS_V212 2253 hmt MACH_HMT HMT 2254 -suen3 MACH_SUEN3 SUEN3 2255 +km_kirkwood MACH_KM_KIRKWOOD KM_KIRKWOOD 2255 vesper MACH_VESPER VESPER 2256 str9 MACH_STR9 STR9 2257 omap3_wl_ff MACH_OMAP3_WL_FF OMAP3_WL_FF 2258 @@ -2987,7 +2987,7 @@ pxwnas_500_1000 MACH_PXWNAS_500_1000 PXWNAS_500_1000 3001 ea20 MACH_EA20 EA20 3002 awm2 MACH_AWM2 AWM2 3003 ti8148evm MACH_TI8148EVM TI8148EVM 3004 -tegra_seaboard MACH_TEGRA_SEABOARD TEGRA_SEABOARD 3005 +seaboard MACH_SEABOARD SEABOARD 3005 linkstation_chlv2 MACH_LINKSTATION_CHLV2 LINKSTATION_CHLV2 3006 tera_pro2_rack MACH_TERA_PRO2_RACK TERA_PRO2_RACK 3007 rubys MACH_RUBYS RUBYS 3008 @@ -3190,7 +3190,7 @@ synergy MACH_SYNERGY SYNERGY 3205 ics_if_voip MACH_ICS_IF_VOIP ICS_IF_VOIP 3206 wlf_cragg_6410 MACH_WLF_CRAGG_6410 WLF_CRAGG_6410 3207 punica MACH_PUNICA PUNICA 3208 -sbc_nt250 MACH_SBC_NT250 SBC_NT250 3209 +trimslice MACH_TRIMSLICE TRIMSLICE 3209 mx27_wmultra MACH_MX27_WMULTRA MX27_WMULTRA 3210 mackerel MACH_MACKEREL MACKEREL 3211 fa9x27 MACH_FA9X27 FA9X27 3213 @@ -3219,3 +3219,100 @@ pivicc MACH_PIVICC PIVICC 3235 pcm048 MACH_PCM048 PCM048 3236 dds MACH_DDS DDS 3237 chalten_xa1 MACH_CHALTEN_XA1 CHALTEN_XA1 3238 +ts48xx MACH_TS48XX TS48XX 3239 +tonga2_tfttimer MACH_TONGA2_TFTTIMER TONGA2_TFTTIMER 3240 +whistler MACH_WHISTLER WHISTLER 3241 +asl_phoenix MACH_ASL_PHOENIX ASL_PHOENIX 3242 +at91sam9263otlite MACH_AT91SAM9263OTLITE AT91SAM9263OTLITE 3243 +ddplug MACH_DDPLUG DDPLUG 3244 +d2plug MACH_D2PLUG D2PLUG 3245 +kzm9d MACH_KZM9D KZM9D 3246 +verdi_lte MACH_VERDI_LTE VERDI_LTE 3247 +nanozoom MACH_NANOZOOM NANOZOOM 3248 +dm3730_som_lv MACH_DM3730_SOM_LV DM3730_SOM_LV 3249 +dm3730_torpedo MACH_DM3730_TORPEDO DM3730_TORPEDO 3250 +anchovy MACH_ANCHOVY ANCHOVY 3251 +re2rev20 MACH_RE2REV20 RE2REV20 3253 +re2rev21 MACH_RE2REV21 RE2REV21 3254 +cns21xx MACH_CNS21XX CNS21XX 3255 +rider MACH_RIDER RIDER 3257 +nsk330 MACH_NSK330 NSK330 3258 +cns2133evb MACH_CNS2133EVB CNS2133EVB 3259 +z3_816x_mod MACH_Z3_816X_MOD Z3_816X_MOD 3260 +z3_814x_mod MACH_Z3_814X_MOD Z3_814X_MOD 3261 +beect MACH_BEECT BEECT 3262 +dma_thunderbug MACH_DMA_THUNDERBUG DMA_THUNDERBUG 3263 +omn_at91sam9g20 MACH_OMN_AT91SAM9G20 OMN_AT91SAM9G20 3264 +mx25_e2s_uc MACH_MX25_E2S_UC MX25_E2S_UC 3265 +mione MACH_MIONE MIONE 3266 +top9000_tcu MACH_TOP9000_TCU TOP9000_TCU 3267 +top9000_bsl MACH_TOP9000_BSL TOP9000_BSL 3268 +kingdom MACH_KINGDOM KINGDOM 3269 +armadillo460 MACH_ARMADILLO460 ARMADILLO460 3270 +lq2 MACH_LQ2 LQ2 3271 +sweda_tms2 MACH_SWEDA_TMS2 SWEDA_TMS2 3272 +mx53_loco MACH_MX53_LOCO MX53_LOCO 3273 +acer_a8 MACH_ACER_A8 ACER_A8 3275 +acer_gauguin MACH_ACER_GAUGUIN ACER_GAUGUIN 3276 +guppy MACH_GUPPY GUPPY 3277 +mx61_ard MACH_MX61_ARD MX61_ARD 3278 +tx53 MACH_TX53 TX53 3279 +omapl138_case_a3 MACH_OMAPL138_CASE_A3 OMAPL138_CASE_A3 3280 +uemd MACH_UEMD UEMD 3281 +ccwmx51mut MACH_CCWMX51MUT CCWMX51MUT 3282 +rockhopper MACH_ROCKHOPPER ROCKHOPPER 3283 +nookcolor MACH_NOOKCOLOR NOOKCOLOR 3284 +hkdkc100 MACH_HKDKC100 HKDKC100 3285 +ts42xx MACH_TS42XX TS42XX 3286 +aebl MACH_AEBL AEBL 3287 +wario MACH_WARIO WARIO 3288 +gfs_spm MACH_GFS_SPM GFS_SPM 3289 +cm_t3730 MACH_CM_T3730 CM_T3730 3290 +isc3 MACH_ISC3 ISC3 3291 +rascal MACH_RASCAL RASCAL 3292 +hrefv60 MACH_HREFV60 HREFV60 3293 +tpt_2_0 MACH_TPT_2_0 TPT_2_0 3294 +pyramid_td MACH_PYRAMID_TD PYRAMID_TD 3295 +splendor MACH_SPLENDOR SPLENDOR 3296 +guf_planet MACH_GUF_PLANET GUF_PLANET 3297 +msm8x60_qt MACH_MSM8X60_QT MSM8X60_QT 3298 +htc_hd_mini MACH_HTC_HD_MINI HTC_HD_MINI 3299 +athene MACH_ATHENE ATHENE 3300 +deep_r_ek_1 MACH_DEEP_R_EK_1 DEEP_R_EK_1 3301 +vivow_ct MACH_VIVOW_CT VIVOW_CT 3302 +nery_1000 MACH_NERY_1000 NERY_1000 3303 +rfl109145_ssrv MACH_RFL109145_SSRV RFL109145_SSRV 3304 +nmh MACH_NMH NMH 3305 +wn802t MACH_WN802T WN802T 3306 +dragonet MACH_DRAGONET DRAGONET 3307 +geneva_b MACH_GENEVA_B GENEVA_B 3308 +at91sam9263desk16l MACH_AT91SAM9263DESK16L AT91SAM9263DESK16L 3309 +bcmhana_sv MACH_BCMHANA_SV BCMHANA_SV 3310 +bcmhana_tablet MACH_BCMHANA_TABLET BCMHANA_TABLET 3311 +koi MACH_KOI KOI 3312 +ts4800 MACH_TS4800 TS4800 3313 +tqma9263 MACH_TQMA9263 TQMA9263 3314 +holiday MACH_HOLIDAY HOLIDAY 3315 +dma_6410 MACH_DMA6410 DMA6410 3316 +pcats_overlay MACH_PCATS_OVERLAY PCATS_OVERLAY 3317 +hwgw6410 MACH_HWGW6410 HWGW6410 3318 +shenzhou MACH_SHENZHOU SHENZHOU 3319 +cwme9210 MACH_CWME9210 CWME9210 3320 +cwme9210js MACH_CWME9210JS CWME9210JS 3321 +pgs_v1 MACH_PGS_SITARA PGS_SITARA 3322 +colibri_tegra2 MACH_COLIBRI_TEGRA2 COLIBRI_TEGRA2 3323 +w21 MACH_W21 W21 3324 +polysat1 MACH_POLYSAT1 POLYSAT1 3325 +dataway MACH_DATAWAY DATAWAY 3326 +cobral138 MACH_COBRAL138 COBRAL138 3327 +roverpcs8 MACH_ROVERPCS8 ROVERPCS8 3328 +marvelc MACH_MARVELC MARVELC 3329 +navefihid MACH_NAVEFIHID NAVEFIHID 3330 +dm365_cv100 MACH_DM365_CV100 DM365_CV100 3331 +able MACH_ABLE ABLE 3332 +legacy MACH_LEGACY LEGACY 3333 +icong MACH_ICONG ICONG 3334 +rover_g8 MACH_ROVER_G8 ROVER_G8 3335 +t5388p MACH_T5388P T5388P 3336 +dingo MACH_DINGO DINGO 3337 +goflexhome MACH_GOFLEXHOME GOFLEXHOME 3338 diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h index 8eaed81ea64..17194fcd404 100644 --- a/arch/powerpc/include/asm/mmu-book3e.h +++ b/arch/powerpc/include/asm/mmu-book3e.h @@ -40,8 +40,8 @@ /* MAS registers bit definitions */ -#define MAS0_TLBSEL(x) ((x << 28) & 0x30000000) -#define MAS0_ESEL(x) ((x << 16) & 0x0FFF0000) +#define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000) +#define MAS0_ESEL(x) (((x) << 16) & 0x0FFF0000) #define MAS0_NV(x) ((x) & 0x00000FFF) #define MAS0_HES 0x00004000 #define MAS0_WQ_ALLWAYS 0x00000000 @@ -50,12 +50,12 @@ #define MAS1_VALID 0x80000000 #define MAS1_IPROT 0x40000000 -#define MAS1_TID(x) ((x << 16) & 0x3FFF0000) +#define MAS1_TID(x) (((x) << 16) & 0x3FFF0000) #define MAS1_IND 0x00002000 #define MAS1_TS 0x00001000 #define MAS1_TSIZE_MASK 0x00000f80 #define MAS1_TSIZE_SHIFT 7 -#define MAS1_TSIZE(x) ((x << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK) +#define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK) #define MAS2_EPN 0xFFFFF000 #define MAS2_X0 0x00000040 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 53b64be40eb..da4b2000854 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -101,7 +101,7 @@ extern phys_addr_t kernstart_addr; #ifdef CONFIG_FLATMEM #define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT) -#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < (ARCH_PFN_OFFSET + max_mapnr)) +#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr) #endif #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S index 55cba4a8a95..f8cd9fba4d3 100644 --- a/arch/powerpc/kernel/cpu_setup_6xx.S +++ b/arch/powerpc/kernel/cpu_setup_6xx.S @@ -18,7 +18,7 @@ #include <asm/mmu.h> _GLOBAL(__setup_cpu_603) - mflr r4 + mflr r5 BEGIN_MMU_FTR_SECTION li r10,0 mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */ @@ -27,60 +27,60 @@ BEGIN_FTR_SECTION bl __init_fpu_registers END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE) bl setup_common_caches - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_604) - mflr r4 + mflr r5 bl setup_common_caches bl setup_604_hid0 - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_750) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_750cx) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 bl setup_750cx - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_750fx) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 bl setup_750fx - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_7400) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_7400_workarounds bl setup_common_caches bl setup_750_7400_hid0 - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_7410) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_7410_workarounds bl setup_common_caches bl setup_750_7400_hid0 li r3,0 mtspr SPRN_L2CR2,r3 - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_745x) - mflr r4 + mflr r5 bl setup_common_caches bl setup_745x_specifics - mtlr r4 + mtlr r5 blr /* Enable caches for 603's, 604, 750 & 7400 */ @@ -194,10 +194,10 @@ setup_750cx: cror 4*cr0+eq,4*cr0+eq,4*cr1+eq cror 4*cr0+eq,4*cr0+eq,4*cr2+eq bnelr - lwz r6,CPU_SPEC_FEATURES(r5) + lwz r6,CPU_SPEC_FEATURES(r4) li r7,CPU_FTR_CAN_NAP andc r6,r6,r7 - stw r6,CPU_SPEC_FEATURES(r5) + stw r6,CPU_SPEC_FEATURES(r4) blr /* 750fx specific @@ -225,12 +225,12 @@ BEGIN_FTR_SECTION andis. r11,r11,L3CR_L3E@h beq 1f END_FTR_SECTION_IFSET(CPU_FTR_L3CR) - lwz r6,CPU_SPEC_FEATURES(r5) + lwz r6,CPU_SPEC_FEATURES(r4) andi. r0,r6,CPU_FTR_L3_DISABLE_NAP beq 1f li r7,CPU_FTR_CAN_NAP andc r6,r6,r7 - stw r6,CPU_SPEC_FEATURES(r5) + stw r6,CPU_SPEC_FEATURES(r4) 1: mfspr r11,SPRN_HID0 diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 8d74a24c550..e8e915ce3d8 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -2076,8 +2076,8 @@ static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s) * pointer on ppc64 and booke as we are running at 0 in real mode * on ppc64 and reloc_offset is always 0 on booke. */ - if (s->cpu_setup) { - s->cpu_setup(offset, s); + if (t->cpu_setup) { + t->cpu_setup(offset, t); } #endif /* CONFIG_PPC64 || CONFIG_BOOKE */ } diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index bf5cb91f07d..fd481232957 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -186,7 +186,7 @@ static void unmap_cpu_from_node(unsigned long cpu) dbg("removing cpu %lu from node %d\n", cpu, node); if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { - cpumask_set_cpu(cpu, node_to_cpumask_map[node]); + cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); } else { printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", cpu, node); @@ -1289,10 +1289,9 @@ u64 memory_hotplug_max(void) } #endif /* CONFIG_MEMORY_HOTPLUG */ -/* Vrtual Processor Home Node (VPHN) support */ +/* Virtual Processor Home Node (VPHN) support */ #ifdef CONFIG_PPC_SPLPAR -#define VPHN_NR_CHANGE_CTRS (8) -static u8 vphn_cpu_change_counts[NR_CPUS][VPHN_NR_CHANGE_CTRS]; +static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS]; static cpumask_t cpu_associativity_changes_mask; static int vphn_enabled; static void set_topology_timer(void); @@ -1303,16 +1302,18 @@ static void set_topology_timer(void); */ static void setup_cpu_associativity_change_counters(void) { - int cpu = 0; + int cpu; + + /* The VPHN feature supports a maximum of 8 reference points */ + BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8); for_each_possible_cpu(cpu) { - int i = 0; + int i; u8 *counts = vphn_cpu_change_counts[cpu]; volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; - for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) { + for (i = 0; i < distance_ref_points_depth; i++) counts[i] = hypervisor_counts[i]; - } } } @@ -1329,7 +1330,7 @@ static void setup_cpu_associativity_change_counters(void) */ static int update_cpu_associativity_changes_mask(void) { - int cpu = 0, nr_cpus = 0; + int cpu, nr_cpus = 0; cpumask_t *changes = &cpu_associativity_changes_mask; cpumask_clear(changes); @@ -1339,8 +1340,8 @@ static int update_cpu_associativity_changes_mask(void) u8 *counts = vphn_cpu_change_counts[cpu]; volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; - for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) { - if (hypervisor_counts[i] > counts[i]) { + for (i = 0; i < distance_ref_points_depth; i++) { + if (hypervisor_counts[i] != counts[i]) { counts[i] = hypervisor_counts[i]; changed = 1; } @@ -1354,8 +1355,11 @@ static int update_cpu_associativity_changes_mask(void) return nr_cpus; } -/* 6 64-bit registers unpacked into 12 32-bit associativity values */ -#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32)) +/* + * 6 64-bit registers unpacked into 12 32-bit associativity values. To form + * the complete property we have to add the length in the first cell. + */ +#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1) /* * Convert the associativity domain numbers returned from the hypervisor @@ -1363,15 +1367,14 @@ static int update_cpu_associativity_changes_mask(void) */ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) { - int i = 0; - int nr_assoc_doms = 0; + int i, nr_assoc_doms = 0; const u16 *field = (const u16*) packed; #define VPHN_FIELD_UNUSED (0xffff) #define VPHN_FIELD_MSB (0x8000) #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB) - for (i = 0; i < VPHN_ASSOC_BUFSIZE; i++) { + for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) { if (*field == VPHN_FIELD_UNUSED) { /* All significant fields processed, and remaining * fields contain the reserved value of all 1's. @@ -1379,14 +1382,12 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) */ unpacked[i] = *((u32*)field); field += 2; - } - else if (*field & VPHN_FIELD_MSB) { + } else if (*field & VPHN_FIELD_MSB) { /* Data is in the lower 15 bits of this field */ unpacked[i] = *field & VPHN_FIELD_MASK; field++; nr_assoc_doms++; - } - else { + } else { /* Data is in the lower 15 bits of this field * concatenated with the next 16 bit field */ @@ -1396,6 +1397,9 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) } } + /* The first cell contains the length of the property */ + unpacked[0] = nr_assoc_doms; + return nr_assoc_doms; } @@ -1405,7 +1409,7 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) */ static long hcall_vphn(unsigned long cpu, unsigned int *associativity) { - long rc = 0; + long rc; long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; u64 flags = 1; int hwcpu = get_hard_smp_processor_id(cpu); @@ -1419,7 +1423,7 @@ static long hcall_vphn(unsigned long cpu, unsigned int *associativity) static long vphn_get_associativity(unsigned long cpu, unsigned int *associativity) { - long rc = 0; + long rc; rc = hcall_vphn(cpu, associativity); @@ -1445,9 +1449,9 @@ static long vphn_get_associativity(unsigned long cpu, */ int arch_update_cpu_topology(void) { - int cpu = 0, nid = 0, old_nid = 0; + int cpu, nid, old_nid; unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; - struct sys_device *sysdev = NULL; + struct sys_device *sysdev; for_each_cpu_mask(cpu, cpu_associativity_changes_mask) { vphn_get_associativity(cpu, associativity); @@ -1512,7 +1516,8 @@ int start_topology_update(void) { int rc = 0; - if (firmware_has_feature(FW_FEATURE_VPHN)) { + if (firmware_has_feature(FW_FEATURE_VPHN) && + get_lppaca()->shared_proc) { vphn_enabled = 1; setup_cpu_associativity_change_counters(); init_timer_deferrable(&topology_timer); diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 5d3ea9f60dd..ca5d5898d32 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -713,6 +713,13 @@ EXPORT_SYMBOL(arch_free_page); /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ extern long hcall_tracepoint_refcount; +/* + * Since the tracing code might execute hcalls we need to guard against + * recursion. One example of this are spinlocks calling H_YIELD on + * shared processor partitions. + */ +static DEFINE_PER_CPU(unsigned int, hcall_trace_depth); + void hcall_tracepoint_regfunc(void) { hcall_tracepoint_refcount++; @@ -725,12 +732,42 @@ void hcall_tracepoint_unregfunc(void) void __trace_hcall_entry(unsigned long opcode, unsigned long *args) { + unsigned long flags; + unsigned int *depth; + + local_irq_save(flags); + + depth = &__get_cpu_var(hcall_trace_depth); + + if (*depth) + goto out; + + (*depth)++; trace_hcall_entry(opcode, args); + (*depth)--; + +out: + local_irq_restore(flags); } void __trace_hcall_exit(long opcode, unsigned long retval, unsigned long *retbuf) { + unsigned long flags; + unsigned int *depth; + + local_irq_save(flags); + + depth = &__get_cpu_var(hcall_trace_depth); + + if (*depth) + goto out; + + (*depth)++; trace_hcall_exit(opcode, retval, retbuf); + (*depth)--; + +out: + local_irq_restore(flags); } #endif diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 4d9ebbab223..68d1537b8c8 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -12,10 +12,8 @@ #include <linux/cpumask.h> #include <asm/segment.h> #include <asm/desc.h> - -#ifdef CONFIG_X86_32 #include <asm/pgtable.h> -#endif +#include <asm/cacheflush.h> #include "realmode/wakeup.h" #include "sleep.h" @@ -149,6 +147,15 @@ void __init acpi_reserve_wakeup_memory(void) memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP"); } +int __init acpi_configure_wakeup_memory(void) +{ + if (acpi_realmode) + set_memory_x(acpi_realmode, WAKEUP_SIZE >> PAGE_SHIFT); + + return 0; +} +arch_initcall(acpi_configure_wakeup_memory); + static int __init acpi_sleep_setup(char *str) { diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 15b5ca2a260..9c949348510 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -37,6 +37,9 @@ static struct posix_acl *btrfs_get_acl(struct inode *inode, int type) char *value = NULL; struct posix_acl *acl; + if (!IS_POSIXACL(inode)) + return NULL; + acl = get_cached_acl(inode, type); if (acl != ACL_NOT_CACHED) return acl; @@ -84,6 +87,9 @@ static int btrfs_xattr_acl_get(struct dentry *dentry, const char *name, struct posix_acl *acl; int ret = 0; + if (!IS_POSIXACL(dentry->d_inode)) + return -EOPNOTSUPP; + acl = btrfs_get_acl(dentry->d_inode, type); if (IS_ERR(acl)) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index f745287fbf2..4d2110eafe2 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -562,7 +562,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, u64 em_len; u64 em_start; struct extent_map *em; - int ret; + int ret = -ENOMEM; u32 *sums; tree = &BTRFS_I(inode)->io_tree; @@ -577,6 +577,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, compressed_len = em->block_len; cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); + if (!cb) + goto out; + atomic_set(&cb->pending_bios, 0); cb->errors = 0; cb->inode = inode; @@ -597,13 +600,18 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE; - cb->compressed_pages = kmalloc(sizeof(struct page *) * nr_pages, + cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); + if (!cb->compressed_pages) + goto fail1; + bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; for (page_index = 0; page_index < nr_pages; page_index++) { cb->compressed_pages[page_index] = alloc_page(GFP_NOFS | __GFP_HIGHMEM); + if (!cb->compressed_pages[page_index]) + goto fail2; } cb->nr_pages = nr_pages; @@ -614,6 +622,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, cb->len = uncompressed_len; comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); + if (!comp_bio) + goto fail2; comp_bio->bi_private = cb; comp_bio->bi_end_io = end_compressed_bio_read; atomic_inc(&cb->pending_bios); @@ -681,6 +691,17 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, bio_put(comp_bio); return 0; + +fail2: + for (page_index = 0; page_index < nr_pages; page_index++) + free_page((unsigned long)cb->compressed_pages[page_index]); + + kfree(cb->compressed_pages); +fail1: + kfree(cb); +out: + free_extent_map(em); + return ret; } static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES]; @@ -900,7 +921,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, return ret; } -void __exit btrfs_exit_compress(void) +void btrfs_exit_compress(void) { free_workspaces(); } diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b531c36455d..fdce8799b98 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1550,6 +1550,7 @@ static int transaction_kthread(void *arg) spin_unlock(&root->fs_info->new_trans_lock); trans = btrfs_join_transaction(root, 1); + BUG_ON(IS_ERR(trans)); if (transid == trans->transid) { ret = btrfs_commit_transaction(trans, root); BUG_ON(ret); @@ -2453,10 +2454,14 @@ int btrfs_commit_super(struct btrfs_root *root) up_write(&root->fs_info->cleanup_work_sem); trans = btrfs_join_transaction(root, 1); + if (IS_ERR(trans)) + return PTR_ERR(trans); ret = btrfs_commit_transaction(trans, root); BUG_ON(ret); /* run commit again to drop the original snapshot */ trans = btrfs_join_transaction(root, 1); + if (IS_ERR(trans)) + return PTR_ERR(trans); btrfs_commit_transaction(trans, root); ret = btrfs_write_and_wait_transaction(NULL, root); BUG_ON(ret); @@ -2554,6 +2559,8 @@ int close_ctree(struct btrfs_root *root) kfree(fs_info->chunk_root); kfree(fs_info->dev_root); kfree(fs_info->csum_root); + kfree(fs_info); + return 0; } diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 9786963b07e..ff27d7a477b 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -171,6 +171,8 @@ static struct dentry *btrfs_get_parent(struct dentry *child) int ret; path = btrfs_alloc_path(); + if (!path) + return ERR_PTR(-ENOMEM); if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) { key.objectid = root->root_key.objectid; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index b55269340ce..4e7e012ad66 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -320,11 +320,6 @@ static int caching_kthread(void *data) if (!path) return -ENOMEM; - exclude_super_stripes(extent_root, block_group); - spin_lock(&block_group->space_info->lock); - block_group->space_info->bytes_readonly += block_group->bytes_super; - spin_unlock(&block_group->space_info->lock); - last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); /* @@ -467,8 +462,10 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, cache->cached = BTRFS_CACHE_NO; } spin_unlock(&cache->lock); - if (ret == 1) + if (ret == 1) { + free_excluded_extents(fs_info->extent_root, cache); return 0; + } } if (load_cache_only) @@ -3344,8 +3341,10 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, u64 reserved; u64 max_reclaim; u64 reclaimed = 0; + long time_left; int pause = 1; int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT; + int loops = 0; block_rsv = &root->fs_info->delalloc_block_rsv; space_info = block_rsv->space_info; @@ -3358,7 +3357,7 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, max_reclaim = min(reserved, to_reclaim); - while (1) { + while (loops < 1024) { /* have the flusher threads jump in and do some IO */ smp_mb(); nr_pages = min_t(unsigned long, nr_pages, @@ -3366,8 +3365,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages); spin_lock(&space_info->lock); - if (reserved > space_info->bytes_reserved) + if (reserved > space_info->bytes_reserved) { + loops = 0; reclaimed += reserved - space_info->bytes_reserved; + } else { + loops++; + } reserved = space_info->bytes_reserved; spin_unlock(&space_info->lock); @@ -3378,7 +3381,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, return -EAGAIN; __set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(pause); + time_left = schedule_timeout(pause); + + /* We were interrupted, exit */ + if (time_left) + break; + pause <<= 1; if (pause > HZ / 10) pause = HZ / 10; @@ -3588,8 +3596,20 @@ void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv, if (num_bytes > 0) { if (dest) { - block_rsv_add_bytes(dest, num_bytes, 0); - } else { + spin_lock(&dest->lock); + if (!dest->full) { + u64 bytes_to_add; + + bytes_to_add = dest->size - dest->reserved; + bytes_to_add = min(num_bytes, bytes_to_add); + dest->reserved += bytes_to_add; + if (dest->reserved >= dest->size) + dest->full = 1; + num_bytes -= bytes_to_add; + } + spin_unlock(&dest->lock); + } + if (num_bytes) { spin_lock(&space_info->lock); space_info->bytes_reserved -= num_bytes; spin_unlock(&space_info->lock); @@ -4012,6 +4032,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) num_bytes = ALIGN(num_bytes, root->sectorsize); atomic_dec(&BTRFS_I(inode)->outstanding_extents); + WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0); spin_lock(&BTRFS_I(inode)->accounting_lock); nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents); @@ -5633,6 +5654,7 @@ use_block_rsv(struct btrfs_trans_handle *trans, struct btrfs_root *root, u32 blocksize) { struct btrfs_block_rsv *block_rsv; + struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; int ret; block_rsv = get_block_rsv(trans, root); @@ -5640,14 +5662,39 @@ use_block_rsv(struct btrfs_trans_handle *trans, if (block_rsv->size == 0) { ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize, 0); - if (ret) + /* + * If we couldn't reserve metadata bytes try and use some from + * the global reserve. + */ + if (ret && block_rsv != global_rsv) { + ret = block_rsv_use_bytes(global_rsv, blocksize); + if (!ret) + return global_rsv; + return ERR_PTR(ret); + } else if (ret) { return ERR_PTR(ret); + } return block_rsv; } ret = block_rsv_use_bytes(block_rsv, blocksize); if (!ret) return block_rsv; + if (ret) { + WARN_ON(1); + ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize, + 0); + if (!ret) { + spin_lock(&block_rsv->lock); + block_rsv->size += blocksize; + spin_unlock(&block_rsv->lock); + return block_rsv; + } else if (ret && block_rsv != global_rsv) { + ret = block_rsv_use_bytes(global_rsv, blocksize); + if (!ret) + return global_rsv; + } + } return ERR_PTR(-ENOSPC); } @@ -6221,6 +6268,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root, BUG_ON(!wc); trans = btrfs_start_transaction(tree_root, 0); + BUG_ON(IS_ERR(trans)); + if (block_rsv) trans->block_rsv = block_rsv; @@ -6318,6 +6367,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, btrfs_end_transaction_throttle(trans, tree_root); trans = btrfs_start_transaction(tree_root, 0); + BUG_ON(IS_ERR(trans)); if (block_rsv) trans->block_rsv = block_rsv; } @@ -6446,6 +6496,8 @@ static noinline int relocate_inode_pages(struct inode *inode, u64 start, int ret = 0; ra = kzalloc(sizeof(*ra), GFP_NOFS); + if (!ra) + return -ENOMEM; mutex_lock(&inode->i_mutex); first_index = start >> PAGE_CACHE_SHIFT; @@ -7477,7 +7529,7 @@ int btrfs_drop_dead_reloc_roots(struct btrfs_root *root) BUG_ON(reloc_root->commit_root != NULL); while (1) { trans = btrfs_join_transaction(root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); mutex_lock(&root->fs_info->drop_mutex); ret = btrfs_drop_snapshot(trans, reloc_root); @@ -7535,7 +7587,7 @@ int btrfs_cleanup_reloc_trees(struct btrfs_root *root) if (found) { trans = btrfs_start_transaction(root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); ret = btrfs_commit_transaction(trans, root); BUG_ON(ret); } @@ -7779,7 +7831,7 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root, trans = btrfs_start_transaction(extent_root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); if (extent_key->objectid == 0) { ret = del_extent_zero(trans, extent_root, path, extent_key); @@ -8270,6 +8322,13 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) if (block_group->cached == BTRFS_CACHE_STARTED) wait_block_group_cache_done(block_group); + /* + * We haven't cached this block group, which means we could + * possibly have excluded extents on this block group. + */ + if (block_group->cached == BTRFS_CACHE_NO) + free_excluded_extents(info->extent_root, block_group); + btrfs_remove_free_space_cache(block_group); btrfs_put_block_group(block_group); @@ -8385,6 +8444,13 @@ int btrfs_read_block_groups(struct btrfs_root *root) cache->sectorsize = root->sectorsize; /* + * We need to exclude the super stripes now so that the space + * info has super bytes accounted for, otherwise we'll think + * we have more space than we actually do. + */ + exclude_super_stripes(root, cache); + + /* * check for two cases, either we are full, and therefore * don't need to bother with the caching work since we won't * find any space, or we are empty, and we can just add all @@ -8392,12 +8458,10 @@ int btrfs_read_block_groups(struct btrfs_root *root) * time, particularly in the full case. */ if (found_key.offset == btrfs_block_group_used(&cache->item)) { - exclude_super_stripes(root, cache); cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; free_excluded_extents(root, cache); } else if (btrfs_block_group_used(&cache->item) == 0) { - exclude_super_stripes(root, cache); cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; add_new_free_space(cache, root->fs_info, diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 2e993cf1766..5e76a474cb7 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1865,7 +1865,7 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num, bio_get(bio); if (tree->ops && tree->ops->submit_bio_hook) - tree->ops->submit_bio_hook(page->mapping->host, rw, bio, + ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio, mirror_num, bio_flags, start); else submit_bio(rw, bio); @@ -1920,6 +1920,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, nr = bio_get_nr_vecs(bdev); bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); + if (!bio) + return -ENOMEM; bio_add_page(bio, page, page_size, offset); bio->bi_end_io = end_io_func; @@ -2126,7 +2128,7 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page, ret = __extent_read_full_page(tree, page, get_extent, &bio, 0, &bio_flags); if (bio) - submit_one_bio(READ, bio, 0, bio_flags); + ret = submit_one_bio(READ, bio, 0, bio_flags); return ret; } diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index a562a250ae7..4f19a3e1bf3 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -536,6 +536,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, root = root->fs_info->csum_root; path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; while (1) { key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; @@ -548,7 +550,10 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, if (path->slots[0] == 0) goto out; path->slots[0]--; + } else if (ret < 0) { + goto out; } + leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index c800d58f301..c1d3a818731 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -793,8 +793,12 @@ again: for (i = 0; i < num_pages; i++) { pages[i] = grab_cache_page(inode->i_mapping, index + i); if (!pages[i]) { - err = -ENOMEM; - BUG_ON(1); + int c; + for (c = i - 1; c >= 0; c--) { + unlock_page(pages[c]); + page_cache_release(pages[c]); + } + return -ENOMEM; } wait_on_page_writeback(pages[i]); } @@ -946,6 +950,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / (sizeof(struct page *))); pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); + if (!pages) { + ret = -ENOMEM; + goto out; + } /* generic_write_checks can change our pos */ start_pos = pos; @@ -984,8 +992,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, size_t write_bytes = min(iov_iter_count(&i), nrptrs * (size_t)PAGE_CACHE_SIZE - offset); - size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + size_t num_pages = (write_bytes + offset + + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; WARN_ON(num_pages > nrptrs); memset(pages, 0, sizeof(struct page *) * nrptrs); @@ -1015,8 +1023,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, copied = btrfs_copy_from_user(pos, num_pages, write_bytes, pages, &i); - dirty_pages = (copied + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + dirty_pages = (copied + offset + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; if (num_pages > dirty_pages) { if (copied > 0) diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 60d68426695..a0390657451 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -987,11 +987,18 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, return entry; } -static void unlink_free_space(struct btrfs_block_group_cache *block_group, - struct btrfs_free_space *info) +static inline void +__unlink_free_space(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *info) { rb_erase(&info->offset_index, &block_group->free_space_offset); block_group->free_extents--; +} + +static void unlink_free_space(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *info) +{ + __unlink_free_space(block_group, info); block_group->free_space -= info->bytes; } @@ -1016,14 +1023,18 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) u64 max_bytes; u64 bitmap_bytes; u64 extent_bytes; + u64 size = block_group->key.offset; /* * The goal is to keep the total amount of memory used per 1gb of space * at or below 32k, so we need to adjust how much memory we allow to be * used by extent based free space tracking */ - max_bytes = MAX_CACHE_BYTES_PER_GIG * - (div64_u64(block_group->key.offset, 1024 * 1024 * 1024)); + if (size < 1024 * 1024 * 1024) + max_bytes = MAX_CACHE_BYTES_PER_GIG; + else + max_bytes = MAX_CACHE_BYTES_PER_GIG * + div64_u64(size, 1024 * 1024 * 1024); /* * we want to account for 1 more bitmap than what we have so we can make @@ -1171,6 +1182,16 @@ static void add_new_bitmap(struct btrfs_block_group_cache *block_group, recalculate_thresholds(block_group); } +static void free_bitmap(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *bitmap_info) +{ + unlink_free_space(block_group, bitmap_info); + kfree(bitmap_info->bitmap); + kfree(bitmap_info); + block_group->total_bitmaps--; + recalculate_thresholds(block_group); +} + static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, struct btrfs_free_space *bitmap_info, u64 *offset, u64 *bytes) @@ -1195,6 +1216,7 @@ again: */ search_start = *offset; search_bytes = *bytes; + search_bytes = min(search_bytes, end - search_start + 1); ret = search_bitmap(block_group, bitmap_info, &search_start, &search_bytes); BUG_ON(ret < 0 || search_start != *offset); @@ -1211,13 +1233,8 @@ again: if (*bytes) { struct rb_node *next = rb_next(&bitmap_info->offset_index); - if (!bitmap_info->bytes) { - unlink_free_space(block_group, bitmap_info); - kfree(bitmap_info->bitmap); - kfree(bitmap_info); - block_group->total_bitmaps--; - recalculate_thresholds(block_group); - } + if (!bitmap_info->bytes) + free_bitmap(block_group, bitmap_info); /* * no entry after this bitmap, but we still have bytes to @@ -1250,13 +1267,8 @@ again: return -EAGAIN; goto again; - } else if (!bitmap_info->bytes) { - unlink_free_space(block_group, bitmap_info); - kfree(bitmap_info->bitmap); - kfree(bitmap_info); - block_group->total_bitmaps--; - recalculate_thresholds(block_group); - } + } else if (!bitmap_info->bytes) + free_bitmap(block_group, bitmap_info); return 0; } @@ -1359,22 +1371,14 @@ out: return ret; } -int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, - u64 offset, u64 bytes) +bool try_merge_free_space(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *info, bool update_stat) { - struct btrfs_free_space *right_info = NULL; - struct btrfs_free_space *left_info = NULL; - struct btrfs_free_space *info = NULL; - int ret = 0; - - info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); - if (!info) - return -ENOMEM; - - info->offset = offset; - info->bytes = bytes; - - spin_lock(&block_group->tree_lock); + struct btrfs_free_space *left_info; + struct btrfs_free_space *right_info; + bool merged = false; + u64 offset = info->offset; + u64 bytes = info->bytes; /* * first we want to see if there is free space adjacent to the range we @@ -1388,37 +1392,62 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, else left_info = tree_search_offset(block_group, offset - 1, 0, 0); - /* - * If there was no extent directly to the left or right of this new - * extent then we know we're going to have to allocate a new extent, so - * before we do that see if we need to drop this into a bitmap - */ - if ((!left_info || left_info->bitmap) && - (!right_info || right_info->bitmap)) { - ret = insert_into_bitmap(block_group, info); - - if (ret < 0) { - goto out; - } else if (ret) { - ret = 0; - goto out; - } - } - if (right_info && !right_info->bitmap) { - unlink_free_space(block_group, right_info); + if (update_stat) + unlink_free_space(block_group, right_info); + else + __unlink_free_space(block_group, right_info); info->bytes += right_info->bytes; kfree(right_info); + merged = true; } if (left_info && !left_info->bitmap && left_info->offset + left_info->bytes == offset) { - unlink_free_space(block_group, left_info); + if (update_stat) + unlink_free_space(block_group, left_info); + else + __unlink_free_space(block_group, left_info); info->offset = left_info->offset; info->bytes += left_info->bytes; kfree(left_info); + merged = true; } + return merged; +} + +int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, + u64 offset, u64 bytes) +{ + struct btrfs_free_space *info; + int ret = 0; + + info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); + if (!info) + return -ENOMEM; + + info->offset = offset; + info->bytes = bytes; + + spin_lock(&block_group->tree_lock); + + if (try_merge_free_space(block_group, info, true)) + goto link; + + /* + * There was no extent directly to the left or right of this new + * extent then we know we're going to have to allocate a new extent, so + * before we do that see if we need to drop this into a bitmap + */ + ret = insert_into_bitmap(block_group, info); + if (ret < 0) { + goto out; + } else if (ret) { + ret = 0; + goto out; + } +link: ret = link_free_space(block_group, info); if (ret) kfree(info); @@ -1621,6 +1650,7 @@ __btrfs_return_cluster_to_free_space( node = rb_next(&entry->offset_index); rb_erase(&entry->offset_index, &cluster->root); BUG_ON(entry->bitmap); + try_merge_free_space(block_group, entry, false); tree_insert_offset(&block_group->free_space_offset, entry->offset, &entry->offset_index, 0); } @@ -1685,13 +1715,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, ret = offset; if (entry->bitmap) { bitmap_clear_bits(block_group, entry, offset, bytes); - if (!entry->bytes) { - unlink_free_space(block_group, entry); - kfree(entry->bitmap); - kfree(entry); - block_group->total_bitmaps--; - recalculate_thresholds(block_group); - } + if (!entry->bytes) + free_bitmap(block_group, entry); } else { unlink_free_space(block_group, entry); entry->offset += bytes; @@ -1789,6 +1814,8 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, ret = search_start; bitmap_clear_bits(block_group, entry, ret, bytes); + if (entry->bytes == 0) + free_bitmap(block_group, entry); out: spin_unlock(&cluster->lock); spin_unlock(&block_group->tree_lock); @@ -1842,15 +1869,26 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, entry->offset += bytes; entry->bytes -= bytes; - if (entry->bytes == 0) { + if (entry->bytes == 0) rb_erase(&entry->offset_index, &cluster->root); - kfree(entry); - } break; } out: spin_unlock(&cluster->lock); + if (!ret) + return 0; + + spin_lock(&block_group->tree_lock); + + block_group->free_space -= bytes; + if (entry->bytes == 0) { + block_group->free_extents--; + kfree(entry); + } + + spin_unlock(&block_group->tree_lock); + return ret; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 160b55b3e13..bcc461a9695 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -416,7 +416,7 @@ again: } if (start == 0) { trans = btrfs_join_transaction(root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -612,6 +612,7 @@ retry: GFP_NOFS); trans = btrfs_join_transaction(root, 1); + BUG_ON(IS_ERR(trans)); ret = btrfs_reserve_extent(trans, root, async_extent->compressed_size, async_extent->compressed_size, @@ -771,7 +772,7 @@ static noinline int cow_file_range(struct inode *inode, BUG_ON(root == root->fs_info->tree_root); trans = btrfs_join_transaction(root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -1049,7 +1050,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, } else { trans = btrfs_join_transaction(root, 1); } - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); cow_start = (u64)-1; cur_offset = start; @@ -1557,6 +1558,7 @@ out: out_page: unlock_page(page); page_cache_release(page); + kfree(fixup); } /* @@ -1703,7 +1705,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) trans = btrfs_join_transaction_nolock(root, 1); else trans = btrfs_join_transaction(root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; ret = btrfs_update_inode(trans, root, inode); @@ -1720,6 +1722,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) trans = btrfs_join_transaction_nolock(root, 1); else trans = btrfs_join_transaction(root, 1); + BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -2354,6 +2357,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) */ if (is_bad_inode(inode)) { trans = btrfs_start_transaction(root, 0); + BUG_ON(IS_ERR(trans)); btrfs_orphan_del(trans, inode); btrfs_end_transaction(trans, root); iput(inode); @@ -2381,6 +2385,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) if (root->orphan_block_rsv || root->orphan_item_inserted) { trans = btrfs_join_transaction(root, 1); + BUG_ON(IS_ERR(trans)); btrfs_end_transaction(trans, root); } @@ -2641,7 +2646,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; - goto err; + goto out; } path->leave_spinning = 1; @@ -2714,9 +2719,10 @@ static int check_path_shared(struct btrfs_root *root, struct extent_buffer *eb; int level; u64 refs = 1; - int uninitialized_var(ret); for (level = 0; level < BTRFS_MAX_LEVEL; level++) { + int ret; + if (!path->nodes[level]) break; eb = path->nodes[level]; @@ -2727,7 +2733,7 @@ static int check_path_shared(struct btrfs_root *root, if (refs > 1) return 1; } - return ret; /* XXX callers? */ + return 0; } /* @@ -4134,7 +4140,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) } srcu_read_unlock(&root->fs_info->subvol_srcu, index); - if (root != sub_root) { + if (!IS_ERR(inode) && root != sub_root) { down_read(&root->fs_info->cleanup_work_sem); if (!(inode->i_sb->s_flags & MS_RDONLY)) btrfs_orphan_cleanup(sub_root); @@ -4347,6 +4353,8 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) trans = btrfs_join_transaction_nolock(root, 1); else trans = btrfs_join_transaction(root, 1); + if (IS_ERR(trans)) + return PTR_ERR(trans); btrfs_set_trans_block_group(trans, inode); if (nolock) ret = btrfs_end_transaction_nolock(trans, root); @@ -4372,6 +4380,7 @@ void btrfs_dirty_inode(struct inode *inode) return; trans = btrfs_join_transaction(root, 1); + BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); ret = btrfs_update_inode(trans, root, inode); @@ -5176,6 +5185,8 @@ again: em = NULL; btrfs_release_path(root, path); trans = btrfs_join_transaction(root, 1); + if (IS_ERR(trans)) + return ERR_CAST(trans); goto again; } map = kmap(page); @@ -5280,8 +5291,8 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, btrfs_drop_extent_cache(inode, start, start + len - 1, 0); trans = btrfs_join_transaction(root, 0); - if (!trans) - return ERR_PTR(-ENOMEM); + if (IS_ERR(trans)) + return ERR_CAST(trans); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -5505,7 +5516,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, * while we look for nocow cross refs */ trans = btrfs_join_transaction(root, 0); - if (!trans) + if (IS_ERR(trans)) goto must_cow; if (can_nocow_odirect(trans, inode, start, len) == 1) { @@ -5640,7 +5651,7 @@ again: BUG_ON(!ordered); trans = btrfs_join_transaction(root, 1); - if (!trans) { + if (IS_ERR(trans)) { err = -ENOMEM; goto out; } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index a506a22b522..02d224e8c83 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -203,7 +203,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) trans = btrfs_join_transaction(root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); ret = btrfs_update_inode(trans, root, inode); BUG_ON(ret); @@ -907,6 +907,10 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, if (new_size > old_size) { trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto out_unlock; + } ret = btrfs_grow_device(trans, device, new_size); btrfs_commit_transaction(trans, root); } else { @@ -1898,7 +1902,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, memcpy(&new_key, &key, sizeof(new_key)); new_key.objectid = inode->i_ino; - new_key.offset = key.offset + destoff - off; + if (off <= key.offset) + new_key.offset = key.offset + destoff - off; + else + new_key.offset = destoff; trans = btrfs_start_transaction(root, 1); if (IS_ERR(trans)) { @@ -2082,7 +2089,7 @@ static long btrfs_ioctl_trans_start(struct file *file) ret = -ENOMEM; trans = btrfs_start_ioctl_transaction(root, 0); - if (!trans) + if (IS_ERR(trans)) goto out_drop; file->private_data = trans; @@ -2138,9 +2145,9 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) path->leave_spinning = 1; trans = btrfs_start_transaction(root, 1); - if (!trans) { + if (IS_ERR(trans)) { btrfs_free_path(path); - return -ENOMEM; + return PTR_ERR(trans); } dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); @@ -2334,6 +2341,8 @@ static noinline long btrfs_ioctl_start_sync(struct file *file, void __user *argp u64 transid; trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) + return PTR_ERR(trans); transid = trans->transid; btrfs_commit_transaction_async(trans, root, 0); diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 2b61e1ddcd9..083a5547737 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -141,7 +141,7 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, u64 file_offset) { struct rb_root *root = &tree->tree; - struct rb_node *prev; + struct rb_node *prev = NULL; struct rb_node *ret; struct btrfs_ordered_extent *entry; diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index 0d126be22b6..fb2605d998e 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -260,6 +260,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) #else BUG(); #endif + break; case BTRFS_BLOCK_GROUP_ITEM_KEY: bi = btrfs_item_ptr(l, i, struct btrfs_block_group_item); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 045c9c2b2d7..1f5556acb53 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2028,6 +2028,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, while (1) { trans = btrfs_start_transaction(root, 0); + BUG_ON(IS_ERR(trans)); trans->block_rsv = rc->block_rsv; ret = btrfs_block_rsv_check(trans, root, rc->block_rsv, @@ -2147,6 +2148,12 @@ again: } trans = btrfs_join_transaction(rc->extent_root, 1); + if (IS_ERR(trans)) { + if (!err) + btrfs_block_rsv_release(rc->extent_root, + rc->block_rsv, num_bytes); + return PTR_ERR(trans); + } if (!err) { if (num_bytes != rc->merging_rsv_size) { @@ -3222,6 +3229,7 @@ truncate: trans = btrfs_join_transaction(root, 0); if (IS_ERR(trans)) { btrfs_free_path(path); + ret = PTR_ERR(trans); goto out; } @@ -3628,6 +3636,7 @@ int prepare_to_relocate(struct reloc_control *rc) set_reloc_control(rc); trans = btrfs_join_transaction(rc->extent_root, 1); + BUG_ON(IS_ERR(trans)); btrfs_commit_transaction(trans, rc->extent_root); return 0; } @@ -3657,6 +3666,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) while (1) { trans = btrfs_start_transaction(rc->extent_root, 0); + BUG_ON(IS_ERR(trans)); if (update_backref_cache(trans, &rc->backref_cache)) { btrfs_end_transaction(trans, rc->extent_root); @@ -3804,7 +3814,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) /* get rid of pinned extents */ trans = btrfs_join_transaction(rc->extent_root, 1); - btrfs_commit_transaction(trans, rc->extent_root); + if (IS_ERR(trans)) + err = PTR_ERR(trans); + else + btrfs_commit_transaction(trans, rc->extent_root); out_free: btrfs_free_block_rsv(rc->extent_root, rc->block_rsv); btrfs_free_path(path); @@ -4022,6 +4035,7 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) int ret; trans = btrfs_start_transaction(root->fs_info->tree_root, 0); + BUG_ON(IS_ERR(trans)); memset(&root->root_item.drop_progress, 0, sizeof(root->root_item.drop_progress)); @@ -4125,6 +4139,11 @@ int btrfs_recover_relocation(struct btrfs_root *root) set_reloc_control(rc); trans = btrfs_join_transaction(rc->extent_root, 1); + if (IS_ERR(trans)) { + unset_reloc_control(rc); + err = PTR_ERR(trans); + goto out_free; + } rc->merge_reloc_tree = 1; @@ -4154,9 +4173,13 @@ int btrfs_recover_relocation(struct btrfs_root *root) unset_reloc_control(rc); trans = btrfs_join_transaction(rc->extent_root, 1); - btrfs_commit_transaction(trans, rc->extent_root); -out: + if (IS_ERR(trans)) + err = PTR_ERR(trans); + else + btrfs_commit_transaction(trans, rc->extent_root); +out_free: kfree(rc); +out: while (!list_empty(&reloc_roots)) { reloc_root = list_entry(reloc_roots.next, struct btrfs_root, root_list); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index b2130c46fdb..a004008f7d2 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -383,7 +383,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, struct btrfs_fs_devices **fs_devices) { substring_t args[MAX_OPT_ARGS]; - char *opts, *p; + char *opts, *orig, *p; int error = 0; int intarg; @@ -397,6 +397,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, opts = kstrdup(options, GFP_KERNEL); if (!opts) return -ENOMEM; + orig = opts; while ((p = strsep(&opts, ",")) != NULL) { int token; @@ -432,7 +433,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, } out_free_opts: - kfree(opts); + kfree(orig); out: /* * If no subvolume name is specified we use the default one. Allocate @@ -623,6 +624,8 @@ int btrfs_sync_fs(struct super_block *sb, int wait) btrfs_wait_ordered_extents(root, 0, 0); trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) + return PTR_ERR(trans); ret = btrfs_commit_transaction(trans, root); return ret; } @@ -761,6 +764,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, } btrfs_close_devices(fs_devices); + kfree(fs_info); + kfree(tree_root); } else { char b[BDEVNAME_SIZE]; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index bae5c7b8bbe..3d73c8d93bb 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1161,6 +1161,11 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, INIT_DELAYED_WORK(&ac->work, do_async_commit); ac->root = root; ac->newtrans = btrfs_join_transaction(root, 0); + if (IS_ERR(ac->newtrans)) { + int err = PTR_ERR(ac->newtrans); + kfree(ac); + return err; + } /* take transaction reference */ mutex_lock(&root->fs_info->trans_mutex); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 054744ac571..a4bbb854dfd 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -338,6 +338,12 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, } dst_copy = kmalloc(item_size, GFP_NOFS); src_copy = kmalloc(item_size, GFP_NOFS); + if (!dst_copy || !src_copy) { + btrfs_release_path(root, path); + kfree(dst_copy); + kfree(src_copy); + return -ENOMEM; + } read_extent_buffer(eb, src_copy, src_ptr, item_size); @@ -665,6 +671,9 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, btrfs_dir_item_key_to_cpu(leaf, di, &location); name_len = btrfs_dir_name_len(leaf, di); name = kmalloc(name_len, GFP_NOFS); + if (!name) + return -ENOMEM; + read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); btrfs_release_path(root, path); @@ -744,6 +753,9 @@ static noinline int backref_in_log(struct btrfs_root *log, int match = 0; path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + ret = btrfs_search_slot(NULL, log, key, path, 0, 0); if (ret != 0) goto out; @@ -967,6 +979,8 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, key.offset = (u64)-1; path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; while (1) { ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); @@ -1178,6 +1192,9 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, name_len = btrfs_dir_name_len(eb, di); name = kmalloc(name_len, GFP_NOFS); + if (!name) + return -ENOMEM; + log_type = btrfs_dir_type(eb, di); read_extent_buffer(eb, name, (unsigned long)(di + 1), name_len); @@ -1692,6 +1709,8 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, root_owner = btrfs_header_owner(parent); next = btrfs_find_create_tree_block(root, bytenr, blocksize); + if (!next) + return -ENOMEM; if (*level == 1) { wc->process_func(root, next, wc, ptr_gen); @@ -2032,6 +2051,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, wait_log_commit(trans, log_root_tree, log_root_tree->log_transid); mutex_unlock(&log_root_tree->log_mutex); + ret = 0; goto out; } atomic_set(&log_root_tree->log_commit[index2], 1); @@ -2096,7 +2116,7 @@ out: smp_mb(); if (waitqueue_active(&root->log_commit_wait[index1])) wake_up(&root->log_commit_wait[index1]); - return 0; + return ret; } static void free_log_tree(struct btrfs_trans_handle *trans, @@ -2194,6 +2214,9 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, log = root->log_root; path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino, name, name_len, -1); if (IS_ERR(di)) { @@ -2594,6 +2617,9 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, ins_data = kmalloc(nr * sizeof(struct btrfs_key) + nr * sizeof(u32), GFP_NOFS); + if (!ins_data) + return -ENOMEM; + ins_sizes = (u32 *)ins_data; ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); @@ -2725,7 +2751,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, log = root->log_root; path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; dst_path = btrfs_alloc_path(); + if (!dst_path) { + btrfs_free_path(path); + return -ENOMEM; + } min_key.objectid = inode->i_ino; min_key.type = BTRFS_INODE_ITEM_KEY; @@ -3080,6 +3112,7 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) BUG_ON(!path); trans = btrfs_start_transaction(fs_info->tree_root, 0); + BUG_ON(IS_ERR(trans)); wc.trans = trans; wc.pin = 1; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index d158530233b..2636a051e4b 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1213,6 +1213,10 @@ static int btrfs_rm_dev_item(struct btrfs_root *root, return -ENOMEM; trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) { + btrfs_free_path(path); + return PTR_ERR(trans); + } key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.type = BTRFS_DEV_ITEM_KEY; key.offset = device->devid; @@ -1606,6 +1610,12 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) } trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) { + kfree(device); + ret = PTR_ERR(trans); + goto error; + } + lock_chunks(root); device->writeable = 1; @@ -1873,7 +1883,7 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, return ret; trans = btrfs_start_transaction(root, 0); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); lock_chunks(root); @@ -2047,7 +2057,7 @@ int btrfs_balance(struct btrfs_root *dev_root) BUG_ON(ret); trans = btrfs_start_transaction(dev_root, 0); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); ret = btrfs_grow_device(trans, device, old_size); BUG_ON(ret); @@ -2213,6 +2223,11 @@ again: /* Shrinking succeeded, else we would be at "done". */ trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto done; + } + lock_chunks(root); device->disk_total_bytes = new_size; diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index 1e7636b145a..beeebf19423 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c @@ -372,6 +372,10 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl, ppace = kmalloc(num_aces * sizeof(struct cifs_ace *), GFP_KERNEL); + if (!ppace) { + cERROR(1, "DACL memory allocation error"); + return; + } for (i = 0; i < num_aces; ++i) { ppace[i] = (struct cifs_ace *) (acl_base + acl_size); diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 46c66ed01af..904aa47e351 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -136,9 +136,6 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command) } } - if (ses->status == CifsExiting) - return -EIO; - /* * Give demultiplex thread up to 10 seconds to reconnect, should be * greater than cifs socket timeout which is 7 seconds @@ -156,7 +153,7 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command) * retrying until process is killed or server comes * back on-line */ - if (!tcon->retry || ses->status == CifsExiting) { + if (!tcon->retry) { cFYI(1, "gave up waiting on reconnect in smb_init"); return -EHOSTDOWN; } diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 47d8ff62368..257b6d895e2 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -337,8 +337,12 @@ cifs_echo_request(struct work_struct *work) struct TCP_Server_Info *server = container_of(work, struct TCP_Server_Info, echo.work); - /* no need to ping if we got a response recently */ - if (time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ)) + /* + * We cannot send an echo until the NEGOTIATE_PROTOCOL request is done. + * Also, no need to ping if we got a response recently + */ + if (server->tcpStatus != CifsGood || + time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ)) goto requeue_echo; rc = CIFSSMBEcho(server); @@ -578,12 +582,12 @@ incomplete_rcv: else if (reconnect == 1) continue; - length += 4; /* account for rfc1002 hdr */ + total_read += 4; /* account for rfc1002 hdr */ - - dump_smb(smb_buffer, length); - if (checkSMB(smb_buffer, smb_buffer->Mid, total_read+4)) { - cifs_dump_mem("Bad SMB: ", smb_buffer, 48); + dump_smb(smb_buffer, total_read); + if (checkSMB(smb_buffer, smb_buffer->Mid, total_read)) { + cifs_dump_mem("Bad SMB: ", smb_buffer, + total_read < 48 ? total_read : 48); continue; } @@ -633,11 +637,11 @@ incomplete_rcv: mid_entry->largeBuf = isLargeBuf; multi_t2_fnd: mid_entry->midState = MID_RESPONSE_RECEIVED; - list_del_init(&mid_entry->qhead); - mid_entry->callback(mid_entry); #ifdef CONFIG_CIFS_STATS2 mid_entry->when_received = jiffies; #endif + list_del_init(&mid_entry->qhead); + mid_entry->callback(mid_entry); break; } mid_entry = NULL; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 74c0a282d01..e964b1cd5dd 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1662,10 +1662,10 @@ static ssize_t cifs_iovec_write(struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *poffset) { - size_t total_written = 0; - unsigned int written = 0; - unsigned long num_pages, npages; - size_t copied, len, cur_len, i; + unsigned int written; + unsigned long num_pages, npages, i; + size_t copied, len, cur_len; + ssize_t total_written = 0; struct kvec *to_send; struct page **pages; struct iov_iter it; @@ -1821,7 +1821,8 @@ cifs_iovec_read(struct file *file, const struct iovec *iov, { int rc; int xid; - unsigned int total_read, bytes_read = 0; + ssize_t total_read; + unsigned int bytes_read = 0; size_t len, cur_len; int iov_offset = 0; struct cifs_sb_info *cifs_sb; diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index b8c5e2eb43d..fbc5aace54b 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -359,6 +359,10 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf, if (rc) return rc; + /* enable signing if server requires it */ + if (server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) + in_buf->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; + mutex_lock(&server->srv_mutex); mid = AllocMidQEntry(in_buf, server); if (mid == NULL) { diff --git a/kernel/cred.c b/kernel/cred.c index 6a1aa004e37..3a9d6dd53a6 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -252,13 +252,13 @@ struct cred *cred_alloc_blank(void) #endif atomic_set(&new->usage, 1); +#ifdef CONFIG_DEBUG_CREDENTIALS + new->magic = CRED_MAGIC; +#endif if (security_cred_alloc_blank(new, GFP_KERNEL) < 0) goto error; -#ifdef CONFIG_DEBUG_CREDENTIALS - new->magic = CRED_MAGIC; -#endif return new; error: @@ -657,6 +657,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) validate_creds(old); *new = *old; + atomic_set(&new->usage, 1); + set_cred_subscribers(new, 0); get_uid(new->user); get_group_info(new->group_info); @@ -674,8 +676,6 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) if (security_prepare_creds(new, old, GFP_KERNEL) < 0) goto error; - atomic_set(&new->usage, 1); - set_cred_subscribers(new, 0); put_cred(old); validate_creds(new); return new; @@ -748,7 +748,11 @@ bool creds_are_invalid(const struct cred *cred) if (cred->magic != CRED_MAGIC) return true; #ifdef CONFIG_SECURITY_SELINUX - if (selinux_is_enabled()) { + /* + * cred->security == NULL if security_cred_alloc_blank() or + * security_prepare_creds() returned an error. + */ + if (selinux_is_enabled() && cred->security) { if ((unsigned long) cred->security < PAGE_SIZE) return true; if ((*(u32 *)cred->security & 0xffffff00) == diff --git a/kernel/watchdog.c b/kernel/watchdog.c index f37f974aa81..18bb15776c5 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -363,8 +363,14 @@ static int watchdog_nmi_enable(int cpu) goto out_save; } - printk(KERN_ERR "NMI watchdog disabled for cpu%i: unable to create perf event: %ld\n", - cpu, PTR_ERR(event)); + + /* vary the KERN level based on the returned errno */ + if (PTR_ERR(event) == -EOPNOTSUPP) + printk(KERN_INFO "NMI watchdog disabled (cpu%i): not supported (no LAPIC?)\n", cpu); + else if (PTR_ERR(event) == -ENOENT) + printk(KERN_WARNING "NMI watchdog disabled (cpu%i): hardware events not enabled\n", cpu); + else + printk(KERN_ERR "NMI watchdog disabled (cpu%i): unable to create perf event: %ld\n", cpu, PTR_ERR(event)); return PTR_ERR(event); /* success path */ diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index e276eb46853..c8d69927068 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -3198,7 +3198,11 @@ static void selinux_cred_free(struct cred *cred) { struct task_security_struct *tsec = cred->security; - BUG_ON((unsigned long) cred->security < PAGE_SIZE); + /* + * cred->security == NULL if security_cred_alloc_blank() or + * security_prepare_creds() returned an error. + */ + BUG_ON(cred->security && (unsigned long) cred->security < PAGE_SIZE); cred->security = (void *) 0x7UL; kfree(tsec); } diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c index 24d3013c023..7c1fc64cb53 100644 --- a/sound/arm/aaci.c +++ b/sound/arm/aaci.c @@ -50,7 +50,11 @@ static void aaci_ac97_select_codec(struct aaci *aaci, struct snd_ac97 *ac97) if (v & SLFR_1RXV) readl(aaci->base + AACI_SL1RX); - writel(maincr, aaci->base + AACI_MAINCR); + if (maincr != readl(aaci->base + AACI_MAINCR)) { + writel(maincr, aaci->base + AACI_MAINCR); + readl(aaci->base + AACI_MAINCR); + udelay(1); + } } /* @@ -993,6 +997,8 @@ static unsigned int __devinit aaci_size_fifo(struct aaci *aaci) * disabling the channel doesn't clear the FIFO. */ writel(aaci->maincr & ~MAINCR_IE, aaci->base + AACI_MAINCR); + readl(aaci->base + AACI_MAINCR); + udelay(1); writel(aaci->maincr, aaci->base + AACI_MAINCR); /* diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 07f8d6d852c..12e0e41696d 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -680,9 +680,9 @@ static int __cmd_record(int argc, const char **argv) perf_event__synthesize_guest_os); if (!system_wide) - perf_event__synthesize_thread(target_tid, - process_synthesized_event, - session); + perf_event__synthesize_thread_map(evsel_list->threads, + process_synthesized_event, + session); else perf_event__synthesize_threads(process_synthesized_event, session); diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 210c736e6db..c9fd66d4a08 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -876,8 +876,8 @@ static int __cmd_top(void) return -ENOMEM; if (top.target_tid != -1) - perf_event__synthesize_thread(top.target_tid, perf_event__process, - session); + perf_event__synthesize_thread_map(top.evlist->threads, + perf_event__process, session); else perf_event__synthesize_threads(perf_event__process, session); diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 731265f4ad1..fbf5754c886 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -6,6 +6,7 @@ #include "string.h" #include "strlist.h" #include "thread.h" +#include "thread_map.h" static const char *perf_event__names[] = { [0] = "TOTAL", @@ -265,11 +266,12 @@ static int __event__synthesize_thread(union perf_event *comm_event, process, session); } -int perf_event__synthesize_thread(pid_t pid, perf_event__handler_t process, - struct perf_session *session) +int perf_event__synthesize_thread_map(struct thread_map *threads, + perf_event__handler_t process, + struct perf_session *session) { union perf_event *comm_event, *mmap_event; - int err = -1; + int err = -1, thread; comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size); if (comm_event == NULL) @@ -279,8 +281,15 @@ int perf_event__synthesize_thread(pid_t pid, perf_event__handler_t process, if (mmap_event == NULL) goto out_free_comm; - err = __event__synthesize_thread(comm_event, mmap_event, pid, - process, session); + err = 0; + for (thread = 0; thread < threads->nr; ++thread) { + if (__event__synthesize_thread(comm_event, mmap_event, + threads->map[thread], + process, session)) { + err = -1; + break; + } + } free(mmap_event); out_free_comm: free(comm_event); diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index eecb42273d5..9c35170fb37 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -135,6 +135,7 @@ union perf_event { void perf_event__print_totals(void); struct perf_session; +struct thread_map; typedef int (*perf_event__handler_synth_t)(union perf_event *event, struct perf_session *session); @@ -142,8 +143,9 @@ typedef int (*perf_event__handler_t)(union perf_event *event, struct perf_sample *sample, struct perf_session *session); -int perf_event__synthesize_thread(pid_t pid, perf_event__handler_t process, - struct perf_session *session); +int perf_event__synthesize_thread_map(struct thread_map *threads, + perf_event__handler_t process, + struct perf_session *session); int perf_event__synthesize_threads(perf_event__handler_t process, struct perf_session *session); int perf_event__synthesize_kernel_mmap(perf_event__handler_t process, |