summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mach-omap1/lcd_dma.c24
-rw-r--r--arch/arm/mach-omap1/time.c1
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c27
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c2
-rw-r--r--arch/arm/mach-omap2/board-rm680.c3
-rw-r--r--arch/arm/mach-omap2/mux.c1
-rw-r--r--arch/arm/mach-omap2/pm34xx.c7
-rw-r--r--arch/arm/mach-omap2/smartreflex.c11
-rw-r--r--arch/arm/mach-omap2/voltage.c1
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h8
-rw-r--r--arch/powerpc/include/asm/page.h2
-rw-r--r--arch/powerpc/kernel/cpu_setup_6xx.S40
-rw-r--r--arch/powerpc/kernel/cputable.c4
-rw-r--r--arch/powerpc/mm/numa.c55
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c37
-rw-r--r--arch/x86/kernel/acpi/sleep.c13
16 files changed, 143 insertions, 93 deletions
diff --git a/arch/arm/mach-omap1/lcd_dma.c b/arch/arm/mach-omap1/lcd_dma.c
index c9088d85da0..453809359ba 100644
--- a/arch/arm/mach-omap1/lcd_dma.c
+++ b/arch/arm/mach-omap1/lcd_dma.c
@@ -37,7 +37,7 @@ int omap_lcd_dma_running(void)
* On OMAP1510, internal LCD controller will start the transfer
* when it gets enabled, so assume DMA running if LCD enabled.
*/
- if (cpu_is_omap1510())
+ if (cpu_is_omap15xx())
if (omap_readw(OMAP_LCDC_CONTROL) & OMAP_LCDC_CTRL_LCD_EN)
return 1;
@@ -95,7 +95,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_single_transfer);
void omap_set_lcd_dma_b1_rotation(int rotate)
{
- if (cpu_is_omap1510()) {
+ if (cpu_is_omap15xx()) {
printk(KERN_ERR "DMA rotation is not supported in 1510 mode\n");
BUG();
return;
@@ -106,7 +106,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_b1_rotation);
void omap_set_lcd_dma_b1_mirror(int mirror)
{
- if (cpu_is_omap1510()) {
+ if (cpu_is_omap15xx()) {
printk(KERN_ERR "DMA mirror is not supported in 1510 mode\n");
BUG();
}
@@ -116,7 +116,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_b1_mirror);
void omap_set_lcd_dma_b1_vxres(unsigned long vxres)
{
- if (cpu_is_omap1510()) {
+ if (cpu_is_omap15xx()) {
printk(KERN_ERR "DMA virtual resulotion is not supported "
"in 1510 mode\n");
BUG();
@@ -127,7 +127,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_b1_vxres);
void omap_set_lcd_dma_b1_scale(unsigned int xscale, unsigned int yscale)
{
- if (cpu_is_omap1510()) {
+ if (cpu_is_omap15xx()) {
printk(KERN_ERR "DMA scale is not supported in 1510 mode\n");
BUG();
}
@@ -177,7 +177,7 @@ static void set_b1_regs(void)
bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
/* 1510 DMA requires the bottom address to be 2 more
* than the actual last memory access location. */
- if (cpu_is_omap1510() &&
+ if (cpu_is_omap15xx() &&
lcd_dma.data_type == OMAP_DMA_DATA_TYPE_S32)
bottom += 2;
ei = PIXSTEP(0, 0, 1, 0);
@@ -241,7 +241,7 @@ static void set_b1_regs(void)
return; /* Suppress warning about uninitialized vars */
}
- if (cpu_is_omap1510()) {
+ if (cpu_is_omap15xx()) {
omap_writew(top >> 16, OMAP1510_DMA_LCD_TOP_F1_U);
omap_writew(top, OMAP1510_DMA_LCD_TOP_F1_L);
omap_writew(bottom >> 16, OMAP1510_DMA_LCD_BOT_F1_U);
@@ -343,7 +343,7 @@ void omap_free_lcd_dma(void)
BUG();
return;
}
- if (!cpu_is_omap1510())
+ if (!cpu_is_omap15xx())
omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1,
OMAP1610_DMA_LCD_CCR);
lcd_dma.reserved = 0;
@@ -360,7 +360,7 @@ void omap_enable_lcd_dma(void)
* connected. Otherwise the OMAP internal controller will
* start the transfer when it gets enabled.
*/
- if (cpu_is_omap1510() || !lcd_dma.ext_ctrl)
+ if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl)
return;
w = omap_readw(OMAP1610_DMA_LCD_CTRL);
@@ -378,14 +378,14 @@ EXPORT_SYMBOL(omap_enable_lcd_dma);
void omap_setup_lcd_dma(void)
{
BUG_ON(lcd_dma.active);
- if (!cpu_is_omap1510()) {
+ if (!cpu_is_omap15xx()) {
/* Set some reasonable defaults */
omap_writew(0x5440, OMAP1610_DMA_LCD_CCR);
omap_writew(0x9102, OMAP1610_DMA_LCD_CSDP);
omap_writew(0x0004, OMAP1610_DMA_LCD_LCH_CTRL);
}
set_b1_regs();
- if (!cpu_is_omap1510()) {
+ if (!cpu_is_omap15xx()) {
u16 w;
w = omap_readw(OMAP1610_DMA_LCD_CCR);
@@ -407,7 +407,7 @@ void omap_stop_lcd_dma(void)
u16 w;
lcd_dma.active = 0;
- if (cpu_is_omap1510() || !lcd_dma.ext_ctrl)
+ if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl)
return;
w = omap_readw(OMAP1610_DMA_LCD_CCR);
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
index f83fc335c61..6885d2fac18 100644
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -44,7 +44,6 @@
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/io.h>
-#include <linux/sched.h>
#include <asm/system.h>
#include <mach/hardware.h>
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index e906e05bb41..9a2a31e011c 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -115,9 +115,6 @@ static struct omap2_hsmmc_info mmc[] = {
static int devkit8000_panel_enable_lcd(struct omap_dss_device *dssdev)
{
- twl_i2c_write_u8(TWL4030_MODULE_GPIO, 0x80, REG_GPIODATADIR1);
- twl_i2c_write_u8(TWL4030_MODULE_LED, 0x0, 0x0);
-
if (gpio_is_valid(dssdev->reset_gpio))
gpio_set_value_cansleep(dssdev->reset_gpio, 1);
return 0;
@@ -247,6 +244,8 @@ static struct gpio_led gpio_leds[];
static int devkit8000_twl_gpio_setup(struct device *dev,
unsigned gpio, unsigned ngpio)
{
+ int ret;
+
omap_mux_init_gpio(29, OMAP_PIN_INPUT);
/* gpio + 0 is "mmc0_cd" (input/IRQ) */
mmc[0].gpio_cd = gpio + 0;
@@ -255,17 +254,23 @@ static int devkit8000_twl_gpio_setup(struct device *dev,
/* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
- /* gpio + 1 is "LCD_PWREN" (out, active high) */
- devkit8000_lcd_device.reset_gpio = gpio + 1;
- gpio_request(devkit8000_lcd_device.reset_gpio, "LCD_PWREN");
- /* Disable until needed */
- gpio_direction_output(devkit8000_lcd_device.reset_gpio, 0);
+ /* TWL4030_GPIO_MAX + 0 is "LCD_PWREN" (out, active high) */
+ devkit8000_lcd_device.reset_gpio = gpio + TWL4030_GPIO_MAX + 0;
+ ret = gpio_request_one(devkit8000_lcd_device.reset_gpio,
+ GPIOF_DIR_OUT | GPIOF_INIT_LOW, "LCD_PWREN");
+ if (ret < 0) {
+ devkit8000_lcd_device.reset_gpio = -EINVAL;
+ printk(KERN_ERR "Failed to request GPIO for LCD_PWRN\n");
+ }
/* gpio + 7 is "DVI_PD" (out, active low) */
devkit8000_dvi_device.reset_gpio = gpio + 7;
- gpio_request(devkit8000_dvi_device.reset_gpio, "DVI PowerDown");
- /* Disable until needed */
- gpio_direction_output(devkit8000_dvi_device.reset_gpio, 0);
+ ret = gpio_request_one(devkit8000_dvi_device.reset_gpio,
+ GPIOF_DIR_OUT | GPIOF_INIT_LOW, "DVI PowerDown");
+ if (ret < 0) {
+ devkit8000_dvi_device.reset_gpio = -EINVAL;
+ printk(KERN_ERR "Failed to request GPIO for DVI PowerDown\n");
+ }
return 0;
}
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index e001a048dc0..e944025d5ef 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -409,8 +409,6 @@ static void __init omap4_panda_init(void)
platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices));
omap_serial_init();
omap4_twl6030_hsmmc_init(mmc);
- /* OMAP4 Panda uses internal transceiver so register nop transceiver */
- usb_nop_xceiv_register();
omap4_ehci_init();
usb_musb_init(&musb_board_data);
}
diff --git a/arch/arm/mach-omap2/board-rm680.c b/arch/arm/mach-omap2/board-rm680.c
index cb77be7ac44..39a71bb8a30 100644
--- a/arch/arm/mach-omap2/board-rm680.c
+++ b/arch/arm/mach-omap2/board-rm680.c
@@ -40,9 +40,6 @@ static struct regulator_consumer_supply rm680_vemmc_consumers[] = {
static struct regulator_init_data rm680_vemmc = {
.constraints = {
.name = "rm680_vemmc",
- .min_uV = 2900000,
- .max_uV = 2900000,
- .apply_uV = 1,
.valid_modes_mask = REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
.valid_ops_mask = REGULATOR_CHANGE_STATUS
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index fae49d12bc7..98148b6c36e 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -1000,6 +1000,7 @@ int __init omap_mux_init(const char *name, u32 flags,
if (!partition->base) {
pr_err("%s: Could not ioremap mux partition at 0x%08x\n",
__func__, partition->phys);
+ kfree(partition);
return -ENODEV;
}
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index a4aa1920a75..2f864e4b085 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -168,9 +168,10 @@ static void omap3_core_restore_context(void)
* once during boot sequence, but this works as we are not using secure
* services.
*/
-static void omap3_save_secure_ram_context(u32 target_mpu_state)
+static void omap3_save_secure_ram_context(void)
{
u32 ret;
+ int mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
/*
@@ -181,7 +182,7 @@ static void omap3_save_secure_ram_context(u32 target_mpu_state)
pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
ret = _omap_save_secure_sram((u32 *)
__pa(omap3_secure_ram_storage));
- pwrdm_set_next_pwrst(mpu_pwrdm, target_mpu_state);
+ pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state);
/* Following is for error tracking, it should not happen */
if (ret) {
printk(KERN_ERR "save_secure_sram() returns %08x\n",
@@ -1094,7 +1095,7 @@ static int __init omap3_pm_init(void)
local_fiq_disable();
omap_dma_global_context_save();
- omap3_save_secure_ram_context(PWRDM_POWER_ON);
+ omap3_save_secure_ram_context();
omap_dma_global_context_restore();
local_irq_enable();
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
index 77ecebf3fae..c37e823266d 100644
--- a/arch/arm/mach-omap2/smartreflex.c
+++ b/arch/arm/mach-omap2/smartreflex.c
@@ -780,8 +780,7 @@ static int omap_sr_autocomp_show(void *data, u64 *val)
struct omap_sr *sr_info = (struct omap_sr *) data;
if (!sr_info) {
- pr_warning("%s: omap_sr struct for sr_%s not found\n",
- __func__, sr_info->voltdm->name);
+ pr_warning("%s: omap_sr struct not found\n", __func__);
return -EINVAL;
}
@@ -795,8 +794,7 @@ static int omap_sr_autocomp_store(void *data, u64 val)
struct omap_sr *sr_info = (struct omap_sr *) data;
if (!sr_info) {
- pr_warning("%s: omap_sr struct for sr_%s not found\n",
- __func__, sr_info->voltdm->name);
+ pr_warning("%s: omap_sr struct not found\n", __func__);
return -EINVAL;
}
@@ -834,7 +832,8 @@ static int __init omap_sr_probe(struct platform_device *pdev)
if (!pdata) {
dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_free_devinfo;
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -966,7 +965,7 @@ static int __devexit omap_sr_remove(struct platform_device *pdev)
}
sr_info = _sr_lookup(pdata->voltdm);
- if (!sr_info) {
+ if (IS_ERR(sr_info)) {
dev_warn(&pdev->dev, "%s: omap_sr struct not found\n",
__func__);
return -EINVAL;
diff --git a/arch/arm/mach-omap2/voltage.c b/arch/arm/mach-omap2/voltage.c
index ed6079c94c5..12be525b8df 100644
--- a/arch/arm/mach-omap2/voltage.c
+++ b/arch/arm/mach-omap2/voltage.c
@@ -471,6 +471,7 @@ static void __init vdd_debugfs_init(struct omap_vdd_info *vdd)
strcat(name, vdd->voltdm.name);
vdd->debug_dir = debugfs_create_dir(name, voltage_dir);
+ kfree(name);
if (IS_ERR(vdd->debug_dir)) {
pr_warning("%s: Unable to create debugfs directory for"
" vdd_%s\n", __func__, vdd->voltdm.name);
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 8eaed81ea64..17194fcd404 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -40,8 +40,8 @@
/* MAS registers bit definitions */
-#define MAS0_TLBSEL(x) ((x << 28) & 0x30000000)
-#define MAS0_ESEL(x) ((x << 16) & 0x0FFF0000)
+#define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000)
+#define MAS0_ESEL(x) (((x) << 16) & 0x0FFF0000)
#define MAS0_NV(x) ((x) & 0x00000FFF)
#define MAS0_HES 0x00004000
#define MAS0_WQ_ALLWAYS 0x00000000
@@ -50,12 +50,12 @@
#define MAS1_VALID 0x80000000
#define MAS1_IPROT 0x40000000
-#define MAS1_TID(x) ((x << 16) & 0x3FFF0000)
+#define MAS1_TID(x) (((x) << 16) & 0x3FFF0000)
#define MAS1_IND 0x00002000
#define MAS1_TS 0x00001000
#define MAS1_TSIZE_MASK 0x00000f80
#define MAS1_TSIZE_SHIFT 7
-#define MAS1_TSIZE(x) ((x << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK)
+#define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK)
#define MAS2_EPN 0xFFFFF000
#define MAS2_X0 0x00000040
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 53b64be40eb..da4b2000854 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -101,7 +101,7 @@ extern phys_addr_t kernstart_addr;
#ifdef CONFIG_FLATMEM
#define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT)
-#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < (ARCH_PFN_OFFSET + max_mapnr))
+#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr)
#endif
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
index 55cba4a8a95..f8cd9fba4d3 100644
--- a/arch/powerpc/kernel/cpu_setup_6xx.S
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -18,7 +18,7 @@
#include <asm/mmu.h>
_GLOBAL(__setup_cpu_603)
- mflr r4
+ mflr r5
BEGIN_MMU_FTR_SECTION
li r10,0
mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */
@@ -27,60 +27,60 @@ BEGIN_FTR_SECTION
bl __init_fpu_registers
END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE)
bl setup_common_caches
- mtlr r4
+ mtlr r5
blr
_GLOBAL(__setup_cpu_604)
- mflr r4
+ mflr r5
bl setup_common_caches
bl setup_604_hid0
- mtlr r4
+ mtlr r5
blr
_GLOBAL(__setup_cpu_750)
- mflr r4
+ mflr r5
bl __init_fpu_registers
bl setup_common_caches
bl setup_750_7400_hid0
- mtlr r4
+ mtlr r5
blr
_GLOBAL(__setup_cpu_750cx)
- mflr r4
+ mflr r5
bl __init_fpu_registers
bl setup_common_caches
bl setup_750_7400_hid0
bl setup_750cx
- mtlr r4
+ mtlr r5
blr
_GLOBAL(__setup_cpu_750fx)
- mflr r4
+ mflr r5
bl __init_fpu_registers
bl setup_common_caches
bl setup_750_7400_hid0
bl setup_750fx
- mtlr r4
+ mtlr r5
blr
_GLOBAL(__setup_cpu_7400)
- mflr r4
+ mflr r5
bl __init_fpu_registers
bl setup_7400_workarounds
bl setup_common_caches
bl setup_750_7400_hid0
- mtlr r4
+ mtlr r5
blr
_GLOBAL(__setup_cpu_7410)
- mflr r4
+ mflr r5
bl __init_fpu_registers
bl setup_7410_workarounds
bl setup_common_caches
bl setup_750_7400_hid0
li r3,0
mtspr SPRN_L2CR2,r3
- mtlr r4
+ mtlr r5
blr
_GLOBAL(__setup_cpu_745x)
- mflr r4
+ mflr r5
bl setup_common_caches
bl setup_745x_specifics
- mtlr r4
+ mtlr r5
blr
/* Enable caches for 603's, 604, 750 & 7400 */
@@ -194,10 +194,10 @@ setup_750cx:
cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
cror 4*cr0+eq,4*cr0+eq,4*cr2+eq
bnelr
- lwz r6,CPU_SPEC_FEATURES(r5)
+ lwz r6,CPU_SPEC_FEATURES(r4)
li r7,CPU_FTR_CAN_NAP
andc r6,r6,r7
- stw r6,CPU_SPEC_FEATURES(r5)
+ stw r6,CPU_SPEC_FEATURES(r4)
blr
/* 750fx specific
@@ -225,12 +225,12 @@ BEGIN_FTR_SECTION
andis. r11,r11,L3CR_L3E@h
beq 1f
END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
- lwz r6,CPU_SPEC_FEATURES(r5)
+ lwz r6,CPU_SPEC_FEATURES(r4)
andi. r0,r6,CPU_FTR_L3_DISABLE_NAP
beq 1f
li r7,CPU_FTR_CAN_NAP
andc r6,r6,r7
- stw r6,CPU_SPEC_FEATURES(r5)
+ stw r6,CPU_SPEC_FEATURES(r4)
1:
mfspr r11,SPRN_HID0
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 8d74a24c550..e8e915ce3d8 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -2076,8 +2076,8 @@ static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s)
* pointer on ppc64 and booke as we are running at 0 in real mode
* on ppc64 and reloc_offset is always 0 on booke.
*/
- if (s->cpu_setup) {
- s->cpu_setup(offset, s);
+ if (t->cpu_setup) {
+ t->cpu_setup(offset, t);
}
#endif /* CONFIG_PPC64 || CONFIG_BOOKE */
}
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index bf5cb91f07d..fd481232957 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -186,7 +186,7 @@ static void unmap_cpu_from_node(unsigned long cpu)
dbg("removing cpu %lu from node %d\n", cpu, node);
if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
- cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
+ cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
} else {
printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
cpu, node);
@@ -1289,10 +1289,9 @@ u64 memory_hotplug_max(void)
}
#endif /* CONFIG_MEMORY_HOTPLUG */
-/* Vrtual Processor Home Node (VPHN) support */
+/* Virtual Processor Home Node (VPHN) support */
#ifdef CONFIG_PPC_SPLPAR
-#define VPHN_NR_CHANGE_CTRS (8)
-static u8 vphn_cpu_change_counts[NR_CPUS][VPHN_NR_CHANGE_CTRS];
+static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
static cpumask_t cpu_associativity_changes_mask;
static int vphn_enabled;
static void set_topology_timer(void);
@@ -1303,16 +1302,18 @@ static void set_topology_timer(void);
*/
static void setup_cpu_associativity_change_counters(void)
{
- int cpu = 0;
+ int cpu;
+
+ /* The VPHN feature supports a maximum of 8 reference points */
+ BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
for_each_possible_cpu(cpu) {
- int i = 0;
+ int i;
u8 *counts = vphn_cpu_change_counts[cpu];
volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
- for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) {
+ for (i = 0; i < distance_ref_points_depth; i++)
counts[i] = hypervisor_counts[i];
- }
}
}
@@ -1329,7 +1330,7 @@ static void setup_cpu_associativity_change_counters(void)
*/
static int update_cpu_associativity_changes_mask(void)
{
- int cpu = 0, nr_cpus = 0;
+ int cpu, nr_cpus = 0;
cpumask_t *changes = &cpu_associativity_changes_mask;
cpumask_clear(changes);
@@ -1339,8 +1340,8 @@ static int update_cpu_associativity_changes_mask(void)
u8 *counts = vphn_cpu_change_counts[cpu];
volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
- for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) {
- if (hypervisor_counts[i] > counts[i]) {
+ for (i = 0; i < distance_ref_points_depth; i++) {
+ if (hypervisor_counts[i] != counts[i]) {
counts[i] = hypervisor_counts[i];
changed = 1;
}
@@ -1354,8 +1355,11 @@ static int update_cpu_associativity_changes_mask(void)
return nr_cpus;
}
-/* 6 64-bit registers unpacked into 12 32-bit associativity values */
-#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32))
+/*
+ * 6 64-bit registers unpacked into 12 32-bit associativity values. To form
+ * the complete property we have to add the length in the first cell.
+ */
+#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
/*
* Convert the associativity domain numbers returned from the hypervisor
@@ -1363,15 +1367,14 @@ static int update_cpu_associativity_changes_mask(void)
*/
static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
{
- int i = 0;
- int nr_assoc_doms = 0;
+ int i, nr_assoc_doms = 0;
const u16 *field = (const u16*) packed;
#define VPHN_FIELD_UNUSED (0xffff)
#define VPHN_FIELD_MSB (0x8000)
#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
- for (i = 0; i < VPHN_ASSOC_BUFSIZE; i++) {
+ for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
if (*field == VPHN_FIELD_UNUSED) {
/* All significant fields processed, and remaining
* fields contain the reserved value of all 1's.
@@ -1379,14 +1382,12 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
*/
unpacked[i] = *((u32*)field);
field += 2;
- }
- else if (*field & VPHN_FIELD_MSB) {
+ } else if (*field & VPHN_FIELD_MSB) {
/* Data is in the lower 15 bits of this field */
unpacked[i] = *field & VPHN_FIELD_MASK;
field++;
nr_assoc_doms++;
- }
- else {
+ } else {
/* Data is in the lower 15 bits of this field
* concatenated with the next 16 bit field
*/
@@ -1396,6 +1397,9 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
}
}
+ /* The first cell contains the length of the property */
+ unpacked[0] = nr_assoc_doms;
+
return nr_assoc_doms;
}
@@ -1405,7 +1409,7 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
*/
static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
{
- long rc = 0;
+ long rc;
long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
u64 flags = 1;
int hwcpu = get_hard_smp_processor_id(cpu);
@@ -1419,7 +1423,7 @@ static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
static long vphn_get_associativity(unsigned long cpu,
unsigned int *associativity)
{
- long rc = 0;
+ long rc;
rc = hcall_vphn(cpu, associativity);
@@ -1445,9 +1449,9 @@ static long vphn_get_associativity(unsigned long cpu,
*/
int arch_update_cpu_topology(void)
{
- int cpu = 0, nid = 0, old_nid = 0;
+ int cpu, nid, old_nid;
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
- struct sys_device *sysdev = NULL;
+ struct sys_device *sysdev;
for_each_cpu_mask(cpu, cpu_associativity_changes_mask) {
vphn_get_associativity(cpu, associativity);
@@ -1512,7 +1516,8 @@ int start_topology_update(void)
{
int rc = 0;
- if (firmware_has_feature(FW_FEATURE_VPHN)) {
+ if (firmware_has_feature(FW_FEATURE_VPHN) &&
+ get_lppaca()->shared_proc) {
vphn_enabled = 1;
setup_cpu_associativity_change_counters();
init_timer_deferrable(&topology_timer);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 5d3ea9f60dd..ca5d5898d32 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -713,6 +713,13 @@ EXPORT_SYMBOL(arch_free_page);
/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
extern long hcall_tracepoint_refcount;
+/*
+ * Since the tracing code might execute hcalls we need to guard against
+ * recursion. One example of this are spinlocks calling H_YIELD on
+ * shared processor partitions.
+ */
+static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
+
void hcall_tracepoint_regfunc(void)
{
hcall_tracepoint_refcount++;
@@ -725,12 +732,42 @@ void hcall_tracepoint_unregfunc(void)
void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
{
+ unsigned long flags;
+ unsigned int *depth;
+
+ local_irq_save(flags);
+
+ depth = &__get_cpu_var(hcall_trace_depth);
+
+ if (*depth)
+ goto out;
+
+ (*depth)++;
trace_hcall_entry(opcode, args);
+ (*depth)--;
+
+out:
+ local_irq_restore(flags);
}
void __trace_hcall_exit(long opcode, unsigned long retval,
unsigned long *retbuf)
{
+ unsigned long flags;
+ unsigned int *depth;
+
+ local_irq_save(flags);
+
+ depth = &__get_cpu_var(hcall_trace_depth);
+
+ if (*depth)
+ goto out;
+
+ (*depth)++;
trace_hcall_exit(opcode, retval, retbuf);
+ (*depth)--;
+
+out:
+ local_irq_restore(flags);
}
#endif
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 4d9ebbab223..68d1537b8c8 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -12,10 +12,8 @@
#include <linux/cpumask.h>
#include <asm/segment.h>
#include <asm/desc.h>
-
-#ifdef CONFIG_X86_32
#include <asm/pgtable.h>
-#endif
+#include <asm/cacheflush.h>
#include "realmode/wakeup.h"
#include "sleep.h"
@@ -149,6 +147,15 @@ void __init acpi_reserve_wakeup_memory(void)
memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP");
}
+int __init acpi_configure_wakeup_memory(void)
+{
+ if (acpi_realmode)
+ set_memory_x(acpi_realmode, WAKEUP_SIZE >> PAGE_SHIFT);
+
+ return 0;
+}
+arch_initcall(acpi_configure_wakeup_memory);
+
static int __init acpi_sleep_setup(char *str)
{