summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kernel/smp.c7
-rw-r--r--arch/arm/mach-integrator/platsmp.c8
-rw-r--r--arch/arm/mach-omap1/leds-h2p2-debug.c1
-rw-r--r--arch/arm/nwfpe/fpmodule.c1
-rw-r--r--arch/arm/plat-omap/ocpi.c1
-rw-r--r--arch/i386/mach-voyager/voyager_basic.c1
-rw-r--r--arch/i386/mach-voyager/voyager_smp.c6
-rw-r--r--arch/ppc/boot/images/Makefile7
-rw-r--r--arch/ppc64/kernel/cpu_setup_power4.S38
-rw-r--r--arch/ppc64/kernel/cputable.c15
-rw-r--r--arch/ppc64/kernel/iSeries_htab.c51
-rw-r--r--arch/ppc64/kernel/iSeries_setup.c18
-rw-r--r--arch/ppc64/kernel/pSeries_lpar.c47
-rw-r--r--arch/ppc64/mm/hash_low.S8
-rw-r--r--arch/ppc64/mm/hash_native.c129
-rw-r--r--arch/ppc64/mm/hash_utils.c16
-rw-r--r--arch/ppc64/mm/hugetlbpage.c16
-rw-r--r--arch/ppc64/mm/init.c7
-rw-r--r--arch/s390/kernel/compat_linux.c38
-rw-r--r--arch/s390/kernel/compat_wrapper.S4
20 files changed, 227 insertions, 192 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index a931409c8fe..7ae45c3fc83 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -36,7 +36,7 @@
* The present bitmask indicates that the CPU is physically present.
* The online bitmask indicates that the CPU is up and running.
*/
-cpumask_t cpu_present_mask;
+cpumask_t cpu_possible_map;
cpumask_t cpu_online_map;
/*
@@ -235,7 +235,8 @@ void __init smp_prepare_boot_cpu(void)
{
unsigned int cpu = smp_processor_id();
- cpu_set(cpu, cpu_present_mask);
+ cpu_set(cpu, cpu_possible_map);
+ cpu_set(cpu, cpu_present_map);
cpu_set(cpu, cpu_online_map);
}
@@ -355,7 +356,7 @@ void show_ipi_list(struct seq_file *p)
seq_puts(p, "IPI:");
- for_each_online_cpu(cpu)
+ for_each_present_cpu(cpu)
seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count);
seq_putc(p, '\n');
diff --git a/arch/arm/mach-integrator/platsmp.c b/arch/arm/mach-integrator/platsmp.c
index ead15dfcb53..2ba02577709 100644
--- a/arch/arm/mach-integrator/platsmp.c
+++ b/arch/arm/mach-integrator/platsmp.c
@@ -174,11 +174,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
max_cpus = ncores;
/*
- * Initialise the present mask - this tells us which CPUs should
- * be present.
+ * Initialise the possible/present maps.
+ * cpu_possible_map describes the set of CPUs which may be present
+ * cpu_present_map describes the set of CPUs populated
*/
for (i = 0; i < max_cpus; i++) {
- cpu_set(i, cpu_present_mask);
+ cpu_set(i, cpu_possible_map);
+ cpu_set(i, cpu_present_map);
}
/*
diff --git a/arch/arm/mach-omap1/leds-h2p2-debug.c b/arch/arm/mach-omap1/leds-h2p2-debug.c
index 6e98290cca5..ec0d8285f24 100644
--- a/arch/arm/mach-omap1/leds-h2p2-debug.c
+++ b/arch/arm/mach-omap1/leds-h2p2-debug.c
@@ -13,7 +13,6 @@
#include <linux/init.h>
#include <linux/kernel_stat.h>
#include <linux/sched.h>
-#include <linux/version.h>
#include <asm/io.h>
#include <asm/hardware.h>
diff --git a/arch/arm/nwfpe/fpmodule.c b/arch/arm/nwfpe/fpmodule.c
index a806fea5c3e..a8efcf34888 100644
--- a/arch/arm/nwfpe/fpmodule.c
+++ b/arch/arm/nwfpe/fpmodule.c
@@ -24,7 +24,6 @@
#include "fpa11.h"
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/config.h>
/* XXX */
diff --git a/arch/arm/plat-omap/ocpi.c b/arch/arm/plat-omap/ocpi.c
index 1fb16f9edfd..2ede2ee8cae 100644
--- a/arch/arm/plat-omap/ocpi.c
+++ b/arch/arm/plat-omap/ocpi.c
@@ -25,7 +25,6 @@
#include <linux/config.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
diff --git a/arch/i386/mach-voyager/voyager_basic.c b/arch/i386/mach-voyager/voyager_basic.c
index 3e439ce5e1b..8680080a6a8 100644
--- a/arch/i386/mach-voyager/voyager_basic.c
+++ b/arch/i386/mach-voyager/voyager_basic.c
@@ -36,6 +36,7 @@
* Power off function, if any
*/
void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
int voyager_level = 0;
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c
index 8c8527593da..0e1f4208b07 100644
--- a/arch/i386/mach-voyager/voyager_smp.c
+++ b/arch/i386/mach-voyager/voyager_smp.c
@@ -10,6 +10,7 @@
* the voyager hal to provide the functionality
*/
#include <linux/config.h>
+#include <linux/module.h>
#include <linux/mm.h>
#include <linux/kernel_stat.h>
#include <linux/delay.h>
@@ -40,6 +41,7 @@ static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR
/* per CPU data structure (for /proc/cpuinfo et al), visible externally
* indexed physically */
struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
+EXPORT_SYMBOL(cpu_data);
/* physical ID of the CPU used to boot the system */
unsigned char boot_cpu_id;
@@ -72,6 +74,7 @@ static volatile unsigned long smp_invalidate_needed;
/* Bitmask of currently online CPUs - used by setup.c for
/proc/cpuinfo, visible externally but still physical */
cpumask_t cpu_online_map = CPU_MASK_NONE;
+EXPORT_SYMBOL(cpu_online_map);
/* Bitmask of CPUs present in the system - exported by i386_syms.c, used
* by scheduler but indexed physically */
@@ -238,6 +241,7 @@ static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
/* This is for the new dynamic CPU boot code */
cpumask_t cpu_callin_map = CPU_MASK_NONE;
cpumask_t cpu_callout_map = CPU_MASK_NONE;
+EXPORT_SYMBOL(cpu_callout_map);
/* The per processor IRQ masks (these are usually kept in sync) */
static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
@@ -978,6 +982,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
preempt_enable();
}
+EXPORT_SYMBOL(flush_tlb_page);
/* enable the requested IRQs */
static void
@@ -1109,6 +1114,7 @@ smp_call_function (void (*func) (void *info), void *info, int retry,
return 0;
}
+EXPORT_SYMBOL(smp_call_function);
/* Sorry about the name. In an APIC based system, the APICs
* themselves are programmed to send a timer interrupt. This is used
diff --git a/arch/ppc/boot/images/Makefile b/arch/ppc/boot/images/Makefile
index c9ac5f5fa9e..532e7ef1edb 100644
--- a/arch/ppc/boot/images/Makefile
+++ b/arch/ppc/boot/images/Makefile
@@ -6,12 +6,17 @@ MKIMAGE := $(srctree)/scripts/mkuboot.sh
extra-y := vmlinux.bin vmlinux.gz
+# two make processes may write to vmlinux.gz at the same time with make -j
+quiet_cmd_mygzip = GZIP $@
+cmd_mygzip = gzip -f -9 < $< > $@.$$$$ && mv $@.$$$$ $@
+
+
OBJCOPYFLAGS_vmlinux.bin := -O binary
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
$(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE
- $(call if_changed,gzip)
+ $(call if_changed,mygzip)
quiet_cmd_uimage = UIMAGE $@
cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A ppc -O linux -T kernel \
diff --git a/arch/ppc64/kernel/cpu_setup_power4.S b/arch/ppc64/kernel/cpu_setup_power4.S
index 42fc08cf87a..0482c063c26 100644
--- a/arch/ppc64/kernel/cpu_setup_power4.S
+++ b/arch/ppc64/kernel/cpu_setup_power4.S
@@ -31,10 +31,13 @@ _GLOBAL(__970_cpu_preinit)
*/
mfspr r0,SPRN_PVR
srwi r0,r0,16
- cmpwi cr0,r0,0x39
- cmpwi cr1,r0,0x3c
- cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
+ cmpwi r0,0x39
+ beq 1f
+ cmpwi r0,0x3c
+ beq 1f
+ cmpwi r0,0x44
bnelr
+1:
/* Make sure HID4:rm_ci is off before MMU is turned off, that large
* pages are enabled with HID4:61 and clear HID5:DCBZ_size and
@@ -133,12 +136,14 @@ _GLOBAL(__save_cpu_setup)
/* We only deal with 970 for now */
mfspr r0,SPRN_PVR
srwi r0,r0,16
- cmpwi cr0,r0,0x39
- cmpwi cr1,r0,0x3c
- cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
- bne 1f
-
- /* Save HID0,1,4 and 5 */
+ cmpwi r0,0x39
+ beq 1f
+ cmpwi r0,0x3c
+ beq 1f
+ cmpwi r0,0x44
+ bne 2f
+
+1: /* Save HID0,1,4 and 5 */
mfspr r3,SPRN_HID0
std r3,CS_HID0(r5)
mfspr r3,SPRN_HID1
@@ -148,7 +153,7 @@ _GLOBAL(__save_cpu_setup)
mfspr r3,SPRN_HID5
std r3,CS_HID5(r5)
-1:
+2:
mtcr r7
blr
@@ -165,12 +170,14 @@ _GLOBAL(__restore_cpu_setup)
/* We only deal with 970 for now */
mfspr r0,SPRN_PVR
srwi r0,r0,16
- cmpwi cr0,r0,0x39
- cmpwi cr1,r0,0x3c
- cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
- bne 1f
+ cmpwi r0,0x39
+ beq 1f
+ cmpwi r0,0x3c
+ beq 1f
+ cmpwi r0,0x44
+ bnelr
- /* Before accessing memory, we make sure rm_ci is clear */
+1: /* Before accessing memory, we make sure rm_ci is clear */
li r0,0
mfspr r3,SPRN_HID4
rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */
@@ -223,6 +230,5 @@ _GLOBAL(__restore_cpu_setup)
mtspr SPRN_HID5,r3
sync
isync
-1:
blr
diff --git a/arch/ppc64/kernel/cputable.c b/arch/ppc64/kernel/cputable.c
index 8d4c46f6f0b..77cec42f952 100644
--- a/arch/ppc64/kernel/cputable.c
+++ b/arch/ppc64/kernel/cputable.c
@@ -183,6 +183,21 @@ struct cpu_spec cpu_specs[] = {
.cpu_setup = __setup_cpu_ppc970,
.firmware_features = COMMON_PPC64_FW,
},
+ { /* PPC970MP */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00440000,
+ .cpu_name = "PPC970MP",
+ .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
+ CPU_FTR_CAN_NAP | CPU_FTR_PMC8 | CPU_FTR_MMCRA,
+ .cpu_user_features = COMMON_USER_PPC64 |
+ PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .cpu_setup = __setup_cpu_ppc970,
+ .firmware_features = COMMON_PPC64_FW,
+ },
{ /* Power5 */
.pvr_mask = 0xffff0000,
.pvr_value = 0x003a0000,
diff --git a/arch/ppc64/kernel/iSeries_htab.c b/arch/ppc64/kernel/iSeries_htab.c
index aa9e8fdd1a4..b0250ae4a72 100644
--- a/arch/ppc64/kernel/iSeries_htab.c
+++ b/arch/ppc64/kernel/iSeries_htab.c
@@ -38,11 +38,12 @@ static inline void iSeries_hunlock(unsigned long slot)
}
static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
- unsigned long prpn, int secondary,
- unsigned long hpteflags, int bolted, int large)
+ unsigned long prpn, unsigned long vflags,
+ unsigned long rflags)
{
long slot;
- HPTE lhpte;
+ hpte_t lhpte;
+ int secondary = 0;
/*
* The hypervisor tries both primary and secondary.
@@ -50,13 +51,13 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
* it means we have already tried both primary and secondary,
* so we return failure immediately.
*/
- if (secondary)
+ if (vflags & HPTE_V_SECONDARY)
return -1;
iSeries_hlock(hpte_group);
slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT);
- BUG_ON(lhpte.dw0.dw0.v);
+ BUG_ON(lhpte.v & HPTE_V_VALID);
if (slot == -1) { /* No available entry found in either group */
iSeries_hunlock(hpte_group);
@@ -64,19 +65,13 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
}
if (slot < 0) { /* MSB set means secondary group */
+ vflags |= HPTE_V_VALID;
secondary = 1;
slot &= 0x7fffffffffffffff;
}
- lhpte.dw1.dword1 = 0;
- lhpte.dw1.dw1.rpn = physRpn_to_absRpn(prpn);
- lhpte.dw1.flags.flags = hpteflags;
-
- lhpte.dw0.dword0 = 0;
- lhpte.dw0.dw0.avpn = va >> 23;
- lhpte.dw0.dw0.h = secondary;
- lhpte.dw0.dw0.bolted = bolted;
- lhpte.dw0.dw0.v = 1;
+ lhpte.v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID;
+ lhpte.r = (physRpn_to_absRpn(prpn) << HPTE_R_RPN_SHIFT) | rflags;
/* Now fill in the actual HPTE */
HvCallHpt_addValidate(slot, secondary, &lhpte);
@@ -88,20 +83,17 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
static unsigned long iSeries_hpte_getword0(unsigned long slot)
{
- unsigned long dword0;
- HPTE hpte;
+ hpte_t hpte;
HvCallHpt_get(&hpte, slot);
- dword0 = hpte.dw0.dword0;
-
- return dword0;
+ return hpte.v;
}
static long iSeries_hpte_remove(unsigned long hpte_group)
{
unsigned long slot_offset;
int i;
- HPTE lhpte;
+ unsigned long hpte_v;
/* Pick a random slot to start at */
slot_offset = mftb() & 0x7;
@@ -109,10 +101,9 @@ static long iSeries_hpte_remove(unsigned long hpte_group)
iSeries_hlock(hpte_group);
for (i = 0; i < HPTES_PER_GROUP; i++) {
- lhpte.dw0.dword0 =
- iSeries_hpte_getword0(hpte_group + slot_offset);
+ hpte_v = iSeries_hpte_getword0(hpte_group + slot_offset);
- if (!lhpte.dw0.dw0.bolted) {
+ if (! (hpte_v & HPTE_V_BOLTED)) {
HvCallHpt_invalidateSetSwBitsGet(hpte_group +
slot_offset, 0, 0);
iSeries_hunlock(hpte_group);
@@ -137,13 +128,13 @@ static long iSeries_hpte_remove(unsigned long hpte_group)
static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
unsigned long va, int large, int local)
{
- HPTE hpte;
+ hpte_t hpte;
unsigned long avpn = va >> 23;
iSeries_hlock(slot);
HvCallHpt_get(&hpte, slot);
- if ((hpte.dw0.dw0.avpn == avpn) && (hpte.dw0.dw0.v)) {
+ if ((HPTE_V_AVPN_VAL(hpte.v) == avpn) && (hpte.v & HPTE_V_VALID)) {
/*
* Hypervisor expects bits as NPPP, which is
* different from how they are mapped in our PP.
@@ -167,7 +158,7 @@ static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
*/
static long iSeries_hpte_find(unsigned long vpn)
{
- HPTE hpte;
+ hpte_t hpte;
long slot;
/*
@@ -177,7 +168,7 @@ static long iSeries_hpte_find(unsigned long vpn)
* 0x80000000xxxxxxxx : Entry found in secondary group, slot x
*/
slot = HvCallHpt_findValid(&hpte, vpn);
- if (hpte.dw0.dw0.v) {
+ if (hpte.v & HPTE_V_VALID) {
if (slot < 0) {
slot &= 0x7fffffffffffffff;
slot = -slot;
@@ -212,7 +203,7 @@ static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
int large, int local)
{
- HPTE lhpte;
+ unsigned long hpte_v;
unsigned long avpn = va >> 23;
unsigned long flags;
@@ -220,9 +211,9 @@ static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
iSeries_hlock(slot);
- lhpte.dw0.dword0 = iSeries_hpte_getword0(slot);
+ hpte_v = iSeries_hpte_getword0(slot);
- if ((lhpte.dw0.dw0.avpn == avpn) && lhpte.dw0.dw0.v)
+ if ((HPTE_V_AVPN_VAL(hpte_v) == avpn) && (hpte_v & HPTE_V_VALID))
HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0);
iSeries_hunlock(slot);
diff --git a/arch/ppc64/kernel/iSeries_setup.c b/arch/ppc64/kernel/iSeries_setup.c
index 077c82fc9f3..a649edbb23b 100644
--- a/arch/ppc64/kernel/iSeries_setup.c
+++ b/arch/ppc64/kernel/iSeries_setup.c
@@ -503,7 +503,7 @@ static void __init build_iSeries_Memory_Map(void)
/* Fill in the hashed page table hash mask */
num_ptegs = hptSizePages *
- (PAGE_SIZE / (sizeof(HPTE) * HPTES_PER_GROUP));
+ (PAGE_SIZE / (sizeof(hpte_t) * HPTES_PER_GROUP));
htab_hash_mask = num_ptegs - 1;
/*
@@ -618,25 +618,23 @@ static void __init setup_iSeries_cache_sizes(void)
static void iSeries_make_pte(unsigned long va, unsigned long pa,
int mode)
{
- HPTE local_hpte, rhpte;
+ hpte_t local_hpte, rhpte;
unsigned long hash, vpn;
long slot;
vpn = va >> PAGE_SHIFT;
hash = hpt_hash(vpn, 0);
- local_hpte.dw1.dword1 = pa | mode;
- local_hpte.dw0.dword0 = 0;
- local_hpte.dw0.dw0.avpn = va >> 23;
- local_hpte.dw0.dw0.bolted = 1; /* bolted */
- local_hpte.dw0.dw0.v = 1;
+ local_hpte.r = pa | mode;
+ local_hpte.v = ((va >> 23) << HPTE_V_AVPN_SHIFT)
+ | HPTE_V_BOLTED | HPTE_V_VALID;
slot = HvCallHpt_findValid(&rhpte, vpn);
if (slot < 0) {
/* Must find space in primary group */
panic("hash_page: hpte already exists\n");
}
- HvCallHpt_addValidate(slot, 0, (HPTE *)&local_hpte );
+ HvCallHpt_addValidate(slot, 0, &local_hpte);
}
/*
@@ -646,7 +644,7 @@ static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr)
{
unsigned long pa;
unsigned long mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX;
- HPTE hpte;
+ hpte_t hpte;
for (pa = saddr; pa < eaddr ;pa += PAGE_SIZE) {
unsigned long ea = (unsigned long)__va(pa);
@@ -659,7 +657,7 @@ static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr)
if (!in_kernel_text(ea))
mode_rw |= HW_NO_EXEC;
- if (hpte.dw0.dw0.v) {
+ if (hpte.v & HPTE_V_VALID) {
/* HPTE exists, so just bolt it */
HvCallHpt_setSwBits(slot, 0x10, 0);
/* And make sure the pp bits are correct */
diff --git a/arch/ppc64/kernel/pSeries_lpar.c b/arch/ppc64/kernel/pSeries_lpar.c
index 6534812db43..74dd144dcce 100644
--- a/arch/ppc64/kernel/pSeries_lpar.c
+++ b/arch/ppc64/kernel/pSeries_lpar.c
@@ -277,31 +277,20 @@ void vpa_init(int cpu)
long pSeries_lpar_hpte_insert(unsigned long hpte_group,
unsigned long va, unsigned long prpn,
- int secondary, unsigned long hpteflags,
- int bolted, int large)
+ unsigned long vflags, unsigned long rflags)
{
unsigned long arpn = physRpn_to_absRpn(prpn);
unsigned long lpar_rc;
unsigned long flags;
unsigned long slot;
- HPTE lhpte;
+ unsigned long hpte_v, hpte_r;
unsigned long dummy0, dummy1;
- /* Fill in the local HPTE with absolute rpn, avpn and flags */
- lhpte.dw1.dword1 = 0;
- lhpte.dw1.dw1.rpn = arpn;
- lhpte.dw1.flags.flags = hpteflags;
+ hpte_v = ((va >> 23) << HPTE_V_AVPN_SHIFT) | vflags | HPTE_V_VALID;
+ if (vflags & HPTE_V_LARGE)
+ hpte_v &= ~(1UL << HPTE_V_AVPN_SHIFT);
- lhpte.dw0.dword0 = 0;
- lhpte.dw0.dw0.avpn = va >> 23;
- lhpte.dw0.dw0.h = secondary;
- lhpte.dw0.dw0.bolted = bolted;
- lhpte.dw0.dw0.v = 1;
-
- if (large) {
- lhpte.dw0.dw0.l = 1;
- lhpte.dw0.dw0.avpn &= ~0x1UL;
- }
+ hpte_r = (arpn << HPTE_R_RPN_SHIFT) | rflags;
/* Now fill in the actual HPTE */
/* Set CEC cookie to 0 */
@@ -312,11 +301,11 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
flags = 0;
/* XXX why is this here? - Anton */
- if (hpteflags & (_PAGE_GUARDED|_PAGE_NO_CACHE))
- lhpte.dw1.flags.flags &= ~_PAGE_COHERENT;
+ if (rflags & (_PAGE_GUARDED|_PAGE_NO_CACHE))
+ hpte_r &= ~_PAGE_COHERENT;
- lpar_rc = plpar_hcall(H_ENTER, flags, hpte_group, lhpte.dw0.dword0,
- lhpte.dw1.dword1, &slot, &dummy0, &dummy1);
+ lpar_rc = plpar_hcall(H_ENTER, flags, hpte_group, hpte_v,
+ hpte_r, &slot, &dummy0, &dummy1);
if (unlikely(lpar_rc == H_PTEG_Full))
return -1;
@@ -332,7 +321,7 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
/* Because of iSeries, we have to pass down the secondary
* bucket bit here as well
*/
- return (slot & 7) | (secondary << 3);
+ return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
}
static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock);
@@ -427,22 +416,18 @@ static long pSeries_lpar_hpte_find(unsigned long vpn)
unsigned long hash;
unsigned long i, j;
long slot;
- union {
- unsigned long dword0;
- Hpte_dword0 dw0;
- } hpte_dw0;
- Hpte_dword0 dw0;
+ unsigned long hpte_v;
hash = hpt_hash(vpn, 0);
for (j = 0; j < 2; j++) {
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
for (i = 0; i < HPTES_PER_GROUP; i++) {
- hpte_dw0.dword0 = pSeries_lpar_hpte_getword0(slot);
- dw0 = hpte_dw0.dw0;
+ hpte_v = pSeries_lpar_hpte_getword0(slot);
- if ((dw0.avpn == (vpn >> 11)) && dw0.v &&
- (dw0.h == j)) {
+ if ((HPTE_V_AVPN_VAL(hpte_v) == (vpn >> 11))
+ && (hpte_v & HPTE_V_VALID)
+ && (!!(hpte_v & HPTE_V_SECONDARY) == j)) {
/* HPTE matches */
if (j)
slot = -slot;
diff --git a/arch/ppc64/mm/hash_low.S b/arch/ppc64/mm/hash_low.S
index c23d46956dd..fbff24827ae 100644
--- a/arch/ppc64/mm/hash_low.S
+++ b/arch/ppc64/mm/hash_low.S
@@ -170,9 +170,7 @@ htab_insert_pte:
/* Call ppc_md.hpte_insert */
ld r7,STK_PARM(r4)(r1) /* Retreive new pp bits */
mr r4,r29 /* Retreive va */
- li r6,0 /* primary slot */
- li r8,0 /* not bolted and not large */
- li r9,0
+ li r6,0 /* no vflags */
_GLOBAL(htab_call_hpte_insert1)
bl . /* Will be patched by htab_finish_init() */
cmpdi 0,r3,0
@@ -192,9 +190,7 @@ _GLOBAL(htab_call_hpte_insert1)
/* Call ppc_md.hpte_insert */
ld r7,STK_PARM(r4)(r1) /* Retreive new pp bits */
mr r4,r29 /* Retreive va */
- li r6,1 /* secondary slot */
- li r8,0 /* not bolted and not large */
- li r9,0
+ li r6,HPTE_V_SECONDARY@l /* secondary slot */
_GLOBAL(htab_call_hpte_insert2)
bl . /* Will be patched by htab_finish_init() */
cmpdi 0,r3,0
diff --git a/arch/ppc64/mm/hash_native.c b/arch/ppc64/mm/hash_native.c
index 4fec05817d6..a6abd3a979b 100644
--- a/arch/ppc64/mm/hash_native.c
+++ b/arch/ppc64/mm/hash_native.c
@@ -27,9 +27,9 @@
static DEFINE_SPINLOCK(native_tlbie_lock);
-static inline void native_lock_hpte(HPTE *hptep)
+static inline void native_lock_hpte(hpte_t *hptep)
{
- unsigned long *word = &hptep->dw0.dword0;
+ unsigned long *word = &hptep->v;
while (1) {
if (!test_and_set_bit(HPTE_LOCK_BIT, word))
@@ -39,32 +39,28 @@ static inline void native_lock_hpte(HPTE *hptep)
}
}
-static inline void native_unlock_hpte(HPTE *hptep)
+static inline void native_unlock_hpte(hpte_t *hptep)
{
- unsigned long *word = &hptep->dw0.dword0;
+ unsigned long *word = &hptep->v;
asm volatile("lwsync":::"memory");
clear_bit(HPTE_LOCK_BIT, word);
}
long native_hpte_insert(unsigned long hpte_group, unsigned long va,
- unsigned long prpn, int secondary,
- unsigned long hpteflags, int bolted, int large)
+ unsigned long prpn, unsigned long vflags,
+ unsigned long rflags)
{
unsigned long arpn = physRpn_to_absRpn(prpn);
- HPTE *hptep = htab_address + hpte_group;
- Hpte_dword0 dw0;
- HPTE lhpte;
+ hpte_t *hptep = htab_address + hpte_group;
+ unsigned long hpte_v, hpte_r;
int i;
for (i = 0; i < HPTES_PER_GROUP; i++) {
- dw0 = hptep->dw0.dw0;
-
- if (!dw0.v) {
+ if (! (hptep->v & HPTE_V_VALID)) {
/* retry with lock held */
native_lock_hpte(hptep);
- dw0 = hptep->dw0.dw0;
- if (!dw0.v)
+ if (! (hptep->v & HPTE_V_VALID))
break;
native_unlock_hpte(hptep);
}
@@ -75,56 +71,45 @@ long native_hpte_insert(unsigned long hpte_group, unsigned long va,
if (i == HPTES_PER_GROUP)
return -1;
- lhpte.dw1.dword1 = 0;
- lhpte.dw1.dw1.rpn = arpn;
- lhpte.dw1.flags.flags = hpteflags;
-
- lhpte.dw0.dword0 = 0;
- lhpte.dw0.dw0.avpn = va >> 23;
- lhpte.dw0.dw0.h = secondary;
- lhpte.dw0.dw0.bolted = bolted;
- lhpte.dw0.dw0.v = 1;
-
- if (large) {
- lhpte.dw0.dw0.l = 1;
- lhpte.dw0.dw0.avpn &= ~0x1UL;
- }
-
- hptep->dw1.dword1 = lhpte.dw1.dword1;
+ hpte_v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID;
+ if (vflags & HPTE_V_LARGE)
+ va &= ~(1UL << HPTE_V_AVPN_SHIFT);
+ hpte_r = (arpn << HPTE_R_RPN_SHIFT) | rflags;
+ hptep->r = hpte_r;
/* Guarantee the second dword is visible before the valid bit */
__asm__ __volatile__ ("eieio" : : : "memory");
-
/*
* Now set the first dword including the valid bit
* NOTE: this also unlocks the hpte
*/
- hptep->dw0.dword0 = lhpte.dw0.dword0;
+ hptep->v = hpte_v;
__asm__ __volatile__ ("ptesync" : : : "memory");
- return i | (secondary << 3);
+ return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
}
static long native_hpte_remove(unsigned long hpte_group)
{
- HPTE *hptep;
- Hpte_dword0 dw0;
+ hpte_t *hptep;
int i;
int slot_offset;
+ unsigned long hpte_v;
/* pick a random entry to start at */
slot_offset = mftb() & 0x7;
for (i = 0; i < HPTES_PER_GROUP; i++) {
hptep = htab_address + hpte_group + slot_offset;
- dw0 = hptep->dw0.dw0;
+ hpte_v = hptep->v;
- if (dw0.v && !dw0.bolted) {
+ if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
/* retry with lock held */
native_lock_hpte(hptep);
- dw0 = hptep->dw0.dw0;
- if (dw0.v && !dw0.bolted)
+ hpte_v = hptep->v;
+ if ((hpte_v & HPTE_V_VALID)
+ && !(hpte_v & HPTE_V_BOLTED))
break;
native_unlock_hpte(hptep);
}
@@ -137,15 +122,15 @@ static long native_hpte_remove(unsigned long hpte_group)
return -1;
/* Invalidate the hpte. NOTE: this also unlocks it */
- hptep->dw0.dword0 = 0;
+ hptep->v = 0;
return i;
}
-static inline void set_pp_bit(unsigned long pp, HPTE *addr)
+static inline void set_pp_bit(unsigned long pp, hpte_t *addr)
{
unsigned long old;
- unsigned long *p = &addr->dw1.dword1;
+ unsigned long *p = &addr->r;
__asm__ __volatile__(
"1: ldarx %0,0,%3\n\
@@ -163,11 +148,11 @@ static inline void set_pp_bit(unsigned long pp, HPTE *addr)
*/
static long native_hpte_find(unsigned long vpn)
{
- HPTE *hptep;
+ hpte_t *hptep;
unsigned long hash;
unsigned long i, j;
long slot;
- Hpte_dword0 dw0;
+ unsigned long hpte_v;
hash = hpt_hash(vpn, 0);
@@ -175,10 +160,11 @@ static long native_hpte_find(unsigned long vpn)
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
for (i = 0; i < HPTES_PER_GROUP; i++) {
hptep = htab_address + slot;
- dw0 = hptep->dw0.dw0;
+ hpte_v = hptep->v;
- if ((dw0.avpn == (vpn >> 11)) && dw0.v &&
- (dw0.h == j)) {
+ if ((HPTE_V_AVPN_VAL(hpte_v) == (vpn >> 11))
+ && (hpte_v & HPTE_V_VALID)
+ && ( !!(hpte_v & HPTE_V_SECONDARY) == j)) {
/* HPTE matches */
if (j)
slot = -slot;
@@ -195,20 +181,21 @@ static long native_hpte_find(unsigned long vpn)
static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
unsigned long va, int large, int local)
{
- HPTE *hptep = htab_address + slot;
- Hpte_dword0 dw0;
+ hpte_t *hptep = htab_address + slot;
+ unsigned long hpte_v;
unsigned long avpn = va >> 23;
int ret = 0;
if (large)
- avpn &= ~0x1UL;
+ avpn &= ~1;
native_lock_hpte(hptep);
- dw0 = hptep->dw0.dw0;
+ hpte_v = hptep->v;
/* Even if we miss, we need to invalidate the TLB */
- if ((dw0.avpn != avpn) || !dw0.v) {
+ if ((HPTE_V_AVPN_VAL(hpte_v) != avpn)
+ || !(hpte_v & HPTE_V_VALID)) {
native_unlock_hpte(hptep);
ret = -1;
} else {
@@ -244,7 +231,7 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
{
unsigned long vsid, va, vpn, flags = 0;
long slot;
- HPTE *hptep;
+ hpte_t *hptep;
int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
vsid = get_kernel_vsid(ea);
@@ -269,26 +256,27 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
static void native_hpte_invalidate(unsigned long slot, unsigned long va,
int large, int local)
{
- HPTE *hptep = htab_address + slot;
- Hpte_dword0 dw0;
+ hpte_t *hptep = htab_address + slot;
+ unsigned long hpte_v;
unsigned long avpn = va >> 23;
unsigned long flags;
int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
if (large)
- avpn &= ~0x1UL;
+ avpn &= ~1;
local_irq_save(flags);
native_lock_hpte(hptep);
- dw0 = hptep->dw0.dw0;
+ hpte_v = hptep->v;
/* Even if we miss, we need to invalidate the TLB */
- if ((dw0.avpn != avpn) || !dw0.v) {
+ if ((HPTE_V_AVPN_VAL(hpte_v) != avpn)
+ || !(hpte_v & HPTE_V_VALID)) {
native_unlock_hpte(hptep);
} else {
/* Invalidate the hpte. NOTE: this also unlocks it */
- hptep->dw0.dword0 = 0;
+ hptep->v = 0;
}
/* Invalidate the tlb */
@@ -315,8 +303,8 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
static void native_hpte_clear(void)
{
unsigned long slot, slots, flags;
- HPTE *hptep = htab_address;
- Hpte_dword0 dw0;
+ hpte_t *hptep = htab_address;
+ unsigned long hpte_v;
unsigned long pteg_count;
pteg_count = htab_hash_mask + 1;
@@ -336,11 +324,11 @@ static void native_hpte_clear(void)
* running, right? and for crash dump, we probably
* don't want to wait for a maybe bad cpu.
*/
- dw0 = hptep->dw0.dw0;
+ hpte_v = hptep->v;
- if (dw0.v) {
- hptep->dw0.dword0 = 0;
- tlbie(slot2va(dw0.avpn, dw0.l, dw0.h, slot), dw0.l);
+ if (hpte_v & HPTE_V_VALID) {
+ hptep->v = 0;
+ tlbie(slot2va(hpte_v, slot), hpte_v & HPTE_V_LARGE);
}
}
@@ -353,8 +341,8 @@ static void native_flush_hash_range(unsigned long context,
{
unsigned long vsid, vpn, va, hash, secondary, slot, flags, avpn;
int i, j;
- HPTE *hptep;
- Hpte_dword0 dw0;
+ hpte_t *hptep;
+ unsigned long hpte_v;
struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
/* XXX fix for large ptes */
@@ -390,14 +378,15 @@ static void native_flush_hash_range(unsigned long context,
native_lock_hpte(hptep);
- dw0 = hptep->dw0.dw0;
+ hpte_v = hptep->v;
/* Even if we miss, we need to invalidate the TLB */
- if ((dw0.avpn != avpn) || !dw0.v) {
+ if ((HPTE_V_AVPN_VAL(hpte_v) != avpn)
+ || !(hpte_v & HPTE_V_VALID)) {
native_unlock_hpte(hptep);
} else {
/* Invalidate the hpte. NOTE: this also unlocks it */
- hptep->dw0.dword0 = 0;
+ hptep->v = 0;
}
j++;
diff --git a/arch/ppc64/mm/hash_utils.c b/arch/ppc64/mm/hash_utils.c
index 1647b1c6f28..623b5d130c3 100644
--- a/arch/ppc64/mm/hash_utils.c
+++ b/arch/ppc64/mm/hash_utils.c
@@ -75,8 +75,8 @@
extern unsigned long dart_tablebase;
#endif /* CONFIG_U3_DART */
-HPTE *htab_address;
-unsigned long htab_hash_mask;
+hpte_t *htab_address;
+unsigned long htab_hash_mask;
extern unsigned long _SDR1;
@@ -97,11 +97,15 @@ static inline void create_pte_mapping(unsigned long start, unsigned long end,
unsigned long addr;
unsigned int step;
unsigned long tmp_mode;
+ unsigned long vflags;
- if (large)
+ if (large) {
step = 16*MB;
- else
+ vflags = HPTE_V_BOLTED | HPTE_V_LARGE;
+ } else {
step = 4*KB;
+ vflags = HPTE_V_BOLTED;
+ }
for (addr = start; addr < end; addr += step) {
unsigned long vpn, hash, hpteg;
@@ -129,12 +133,12 @@ static inline void create_pte_mapping(unsigned long start, unsigned long end,
if (systemcfg->platform & PLATFORM_LPAR)
ret = pSeries_lpar_hpte_insert(hpteg, va,
virt_to_abs(addr) >> PAGE_SHIFT,
- 0, tmp_mode, 1, large);
+ vflags, tmp_mode);
else
#endif /* CONFIG_PPC_PSERIES */
ret = native_hpte_insert(hpteg, va,
virt_to_abs(addr) >> PAGE_SHIFT,
- 0, tmp_mode, 1, large);
+ vflags, tmp_mode);
if (ret == -1) {
ppc64_terminate_msg(0x20, "create_pte_mapping");
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c
index fdcfe97c75c..f9524602818 100644
--- a/arch/ppc64/mm/hugetlbpage.c
+++ b/arch/ppc64/mm/hugetlbpage.c
@@ -583,7 +583,7 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
pte_t *ptep;
unsigned long va, vpn;
pte_t old_pte, new_pte;
- unsigned long hpteflags, prpn;
+ unsigned long rflags, prpn;
long slot;
int err = 1;
@@ -626,9 +626,9 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
old_pte = *ptep;
new_pte = old_pte;
- hpteflags = 0x2 | (! (pte_val(new_pte) & _PAGE_RW));
+ rflags = 0x2 | (! (pte_val(new_pte) & _PAGE_RW));
/* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
- hpteflags |= ((pte_val(new_pte) & _PAGE_EXEC) ? 0 : HW_NO_EXEC);
+ rflags |= ((pte_val(new_pte) & _PAGE_EXEC) ? 0 : HW_NO_EXEC);
/* Check if pte already has an hpte (case 2) */
if (unlikely(pte_val(old_pte) & _PAGE_HASHPTE)) {
@@ -641,7 +641,7 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += (pte_val(old_pte) & _PAGE_GROUP_IX) >> 12;
- if (ppc_md.hpte_updatepp(slot, hpteflags, va, 1, local) == -1)
+ if (ppc_md.hpte_updatepp(slot, rflags, va, 1, local) == -1)
pte_val(old_pte) &= ~_PAGE_HPTEFLAGS;
}
@@ -661,10 +661,10 @@ repeat:
/* Add in WIMG bits */
/* XXX We should store these in the pte */
- hpteflags |= _PAGE_COHERENT;
+ rflags |= _PAGE_COHERENT;
- slot = ppc_md.hpte_insert(hpte_group, va, prpn, 0,
- hpteflags, 0, 1);
+ slot = ppc_md.hpte_insert(hpte_group, va, prpn,
+ HPTE_V_LARGE, rflags);
/* Primary is full, try the secondary */
if (unlikely(slot == -1)) {
@@ -672,7 +672,7 @@ repeat:
hpte_group = ((~hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL;
slot = ppc_md.hpte_insert(hpte_group, va, prpn,
- 1, hpteflags, 0, 1);
+ HPTE_V_LARGE, rflags);
if (slot == -1) {
if (mftb() & 0x1)
hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
index b50b3a446db..e58a24d4287 100644
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -180,9 +180,10 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
/* Panic if a pte grpup is full */
- if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT, 0,
- _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX,
- 1, 0) == -1) {
+ if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT,
+ HPTE_V_BOLTED,
+ _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX)
+ == -1) {
panic("map_io_page: could not insert mapping");
}
}
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 61405622287..18610cea03a 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -58,6 +58,7 @@
#include <linux/compat.h>
#include <linux/vfs.h>
#include <linux/ptrace.h>
+#include <linux/fadvise.h>
#include <asm/types.h>
#include <asm/ipc.h>
@@ -1043,3 +1044,40 @@ sys32_timer_create(clockid_t which_clock, struct compat_sigevent *se32,
return ret;
}
+
+/*
+ * 31 bit emulation wrapper functions for sys_fadvise64/fadvise64_64.
+ * These need to rewrite the advise values for POSIX_FADV_{DONTNEED,NOREUSE}
+ * because the 31 bit values differ from the 64 bit values.
+ */
+
+asmlinkage long
+sys32_fadvise64(int fd, loff_t offset, size_t len, int advise)
+{
+ if (advise == 4)
+ advise = POSIX_FADV_DONTNEED;
+ else if (advise == 5)
+ advise = POSIX_FADV_NOREUSE;
+ return sys_fadvise64(fd, offset, len, advise);
+}
+
+struct fadvise64_64_args {
+ int fd;
+ long long offset;
+ long long len;
+ int advice;
+};
+
+asmlinkage long
+sys32_fadvise64_64(struct fadvise64_64_args __user *args)
+{
+ struct fadvise64_64_args a;
+
+ if ( copy_from_user(&a, args, sizeof(a)) )
+ return -EFAULT;
+ if (a.advice == 4)
+ a.advice = POSIX_FADV_DONTNEED;
+ else if (a.advice == 5)
+ a.advice = POSIX_FADV_NOREUSE;
+ return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
+}
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index bf529739c8a..799a98eac92 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1251,12 +1251,12 @@ sys32_fadvise64_wrapper:
or %r3,%r4 # get low word of 64bit loff_t
llgfr %r4,%r5 # size_t (unsigned long)
lgfr %r5,%r6 # int
- jg sys_fadvise64
+ jg sys32_fadvise64
.globl sys32_fadvise64_64_wrapper
sys32_fadvise64_64_wrapper:
llgtr %r2,%r2 # struct fadvise64_64_args *
- jg s390_fadvise64_64
+ jg sys32_fadvise64_64
.globl sys32_clock_settime_wrapper
sys32_clock_settime_wrapper: