summaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig8
-rw-r--r--arch/ia64/configs/sn2_defconfig2
-rw-r--r--arch/ia64/defconfig2
-rw-r--r--arch/ia64/dig/setup.c2
-rw-r--r--arch/ia64/hp/sim/simscsi.c5
-rw-r--r--arch/ia64/ia32/audit.c26
-rw-r--r--arch/ia64/kernel/Makefile3
-rw-r--r--arch/ia64/kernel/acpi.c2
-rw-r--r--arch/ia64/kernel/audit.c35
-rw-r--r--arch/ia64/kernel/efi.c8
-rw-r--r--arch/ia64/kernel/entry.S4
-rw-r--r--arch/ia64/kernel/gate.lds.S1
-rw-r--r--arch/ia64/kernel/head.S7
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c2
-rw-r--r--arch/ia64/kernel/kprobes.c9
-rw-r--r--arch/ia64/kernel/pal.S18
-rw-r--r--arch/ia64/kernel/palinfo.c47
-rw-r--r--arch/ia64/kernel/perfmon.c4
-rw-r--r--arch/ia64/kernel/setup.c2
-rw-r--r--arch/ia64/kernel/sys_ia64.c28
-rw-r--r--arch/ia64/kernel/topology.c8
-rw-r--r--arch/ia64/kernel/traps.c5
-rw-r--r--arch/ia64/kernel/uncached.c86
-rw-r--r--arch/ia64/lib/Makefile2
-rw-r--r--arch/ia64/mm/contig.c16
-rw-r--r--arch/ia64/mm/discontig.c68
-rw-r--r--arch/ia64/mm/init.c55
-rw-r--r--arch/ia64/mm/ioremap.c6
-rw-r--r--arch/ia64/pci/pci.c2
-rw-r--r--arch/ia64/sn/kernel/setup.c4
-rw-r--r--arch/ia64/sn/kernel/xpc_channel.c4
-rw-r--r--arch/ia64/sn/kernel/xpc_main.c30
-rw-r--r--arch/ia64/sn/kernel/xpc_partition.c24
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c6
34 files changed, 315 insertions, 216 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 47de9ee6bcd..db274da7dba 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -75,6 +75,10 @@ config DMA_IS_NORMAL
depends on IA64_SGI_SN2
default y
+config AUDIT_ARCH
+ bool
+ default y
+
choice
prompt "System type"
default IA64_GENERIC
@@ -258,7 +262,7 @@ config NR_CPUS
int "Maximum number of CPUs (2-1024)"
range 2 1024
depends on SMP
- default "64"
+ default "1024"
help
You should set this to the number of CPUs in your system, but
keep in mind that a kernel compiled for, e.g., 2 CPUs will boot but
@@ -354,7 +358,7 @@ config NUMA
config NODES_SHIFT
int "Max num nodes shift(3-10)"
range 3 10
- default "8"
+ default "10"
depends on NEED_MULTIPLE_NODES
help
This option specifies the maximum number of nodes in your SSI system.
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig
index 9ea35398e10..0f14a82b856 100644
--- a/arch/ia64/configs/sn2_defconfig
+++ b/arch/ia64/configs/sn2_defconfig
@@ -363,7 +363,7 @@ CONFIG_BLK_DEV_IDECD=y
#
CONFIG_IDE_GENERIC=y
CONFIG_BLK_DEV_IDEPCI=y
-# CONFIG_IDEPCI_SHARE_IRQ is not set
+CONFIG_IDEPCI_SHARE_IRQ=y
# CONFIG_BLK_DEV_OFFBOARD is not set
# CONFIG_BLK_DEV_GENERIC is not set
# CONFIG_BLK_DEV_OPTI621 is not set
diff --git a/arch/ia64/defconfig b/arch/ia64/defconfig
index 6cba55da572..9001b3fbaa3 100644
--- a/arch/ia64/defconfig
+++ b/arch/ia64/defconfig
@@ -366,7 +366,7 @@ CONFIG_BLK_DEV_IDESCSI=m
# CONFIG_IDE_GENERIC is not set
# CONFIG_BLK_DEV_IDEPNP is not set
CONFIG_BLK_DEV_IDEPCI=y
-# CONFIG_IDEPCI_SHARE_IRQ is not set
+CONFIG_IDEPCI_SHARE_IRQ=y
# CONFIG_BLK_DEV_OFFBOARD is not set
CONFIG_BLK_DEV_GENERIC=y
# CONFIG_BLK_DEV_OPTI621 is not set
diff --git a/arch/ia64/dig/setup.c b/arch/ia64/dig/setup.c
index 5ab12b8351d..9196b330ff7 100644
--- a/arch/ia64/dig/setup.c
+++ b/arch/ia64/dig/setup.c
@@ -14,7 +14,7 @@
#include <linux/kernel.h>
#include <linux/kdev_t.h>
#include <linux/string.h>
-#include <linux/tty.h>
+#include <linux/screen_info.h>
#include <linux/console.h>
#include <linux/timex.h>
#include <linux/sched.h>
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index a3fe9753113..8f0a16a79a6 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -151,7 +151,7 @@ static void
simscsi_sg_readwrite (struct scsi_cmnd *sc, int mode, unsigned long offset)
{
int list_len = sc->use_sg;
- struct scatterlist *sl = (struct scatterlist *)sc->buffer;
+ struct scatterlist *sl = (struct scatterlist *)sc->request_buffer;
struct disk_stat stat;
struct disk_req req;
@@ -244,7 +244,8 @@ static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len)
if (scatterlen == 0)
memcpy(sc->request_buffer, buf, len);
- else for (slp = (struct scatterlist *)sc->buffer; scatterlen-- > 0 && len > 0; slp++) {
+ else for (slp = (struct scatterlist *)sc->request_buffer;
+ scatterlen-- > 0 && len > 0; slp++) {
unsigned thislen = min(len, slp->length);
memcpy(page_address(slp->page) + slp->offset, buf, thislen);
diff --git a/arch/ia64/ia32/audit.c b/arch/ia64/ia32/audit.c
index ab94f2e58cd..92d7d0c8d93 100644
--- a/arch/ia64/ia32/audit.c
+++ b/arch/ia64/ia32/audit.c
@@ -9,3 +9,29 @@ unsigned ia32_chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
+
+unsigned ia32_write_class[] = {
+#include <asm-generic/audit_write.h>
+~0U
+};
+
+unsigned ia32_read_class[] = {
+#include <asm-generic/audit_read.h>
+~0U
+};
+
+int ia32_classify_syscall(unsigned syscall)
+{
+ switch(syscall) {
+ case __NR_open:
+ return 2;
+ case __NR_openat:
+ return 3;
+ case __NR_socketcall:
+ return 4;
+ case __NR_execve:
+ return 5;
+ default:
+ return 1;
+ }
+}
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 0e4553f320b..ad8215a3c58 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -45,7 +45,8 @@ CPPFLAGS_gate.lds := -P -C -U$(ARCH)
quiet_cmd_gate = GATE $@
cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@
-GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1
+GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \
+ $(call ld-option, -Wl$(comma)--hash-style=sysv)
$(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE
$(call if_changed,gate)
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 99761b81db4..0176556aeec 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -55,7 +55,7 @@
#define BAD_MADT_ENTRY(entry, end) ( \
(!entry) || (unsigned long)entry + sizeof(*entry) > end || \
- ((acpi_table_entry_header *)entry)->length != sizeof(*entry))
+ ((acpi_table_entry_header *)entry)->length < sizeof(*entry))
#define PREFIX "ACPI: "
diff --git a/arch/ia64/kernel/audit.c b/arch/ia64/kernel/audit.c
index f2512931cca..04682555a28 100644
--- a/arch/ia64/kernel/audit.c
+++ b/arch/ia64/kernel/audit.c
@@ -8,19 +8,54 @@ static unsigned dir_class[] = {
~0U
};
+static unsigned read_class[] = {
+#include <asm-generic/audit_read.h>
+~0U
+};
+
+static unsigned write_class[] = {
+#include <asm-generic/audit_write.h>
+~0U
+};
+
static unsigned chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
+int audit_classify_syscall(int abi, unsigned syscall)
+{
+#ifdef CONFIG_IA32_SUPPORT
+ extern int ia32_classify_syscall(unsigned);
+ if (abi == AUDIT_ARCH_I386)
+ return ia32_classify_syscall(syscall);
+#endif
+ switch(syscall) {
+ case __NR_open:
+ return 2;
+ case __NR_openat:
+ return 3;
+ case __NR_execve:
+ return 5;
+ default:
+ return 0;
+ }
+}
+
static int __init audit_classes_init(void)
{
#ifdef CONFIG_IA32_SUPPORT
extern __u32 ia32_dir_class[];
+ extern __u32 ia32_write_class[];
+ extern __u32 ia32_read_class[];
extern __u32 ia32_chattr_class[];
+ audit_register_class(AUDIT_CLASS_WRITE_32, ia32_write_class);
+ audit_register_class(AUDIT_CLASS_READ_32, ia32_read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE_32, ia32_dir_class);
audit_register_class(AUDIT_CLASS_CHATTR_32, ia32_chattr_class);
#endif
+ audit_register_class(AUDIT_CLASS_WRITE, write_class);
+ audit_register_class(AUDIT_CLASS_READ, read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
return 0;
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index b13c0555c3b..bb8770a177b 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -632,7 +632,7 @@ kern_memory_descriptor (unsigned long phys_addr)
if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT))
return md;
}
- return 0;
+ return NULL;
}
static efi_memory_desc_t *
@@ -652,7 +652,7 @@ efi_memory_descriptor (unsigned long phys_addr)
if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
return md;
}
- return 0;
+ return NULL;
}
u32
@@ -759,7 +759,7 @@ valid_phys_addr_range (unsigned long phys_addr, unsigned long size)
}
int
-valid_mmap_phys_addr_range (unsigned long phys_addr, unsigned long size)
+valid_mmap_phys_addr_range (unsigned long pfn, unsigned long size)
{
/*
* MMIO regions are often missing from the EFI memory map.
@@ -923,7 +923,7 @@ find_memmap_space (void)
void
efi_memmap_init(unsigned long *s, unsigned long *e)
{
- struct kern_memdesc *k, *prev = 0;
+ struct kern_memdesc *k, *prev = NULL;
u64 contig_low=0, contig_high=0;
u64 as, ae, lim;
void *efi_map_start, *efi_map_end, *p, *q;
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 12701cf32d9..fef06571be9 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1605,8 +1605,8 @@ sys_call_table:
data8 sys_ni_syscall // 1295 reserved for ppoll
data8 sys_unshare
data8 sys_splice
- data8 sys_set_robust_list
- data8 sys_get_robust_list
+ data8 sys_ni_syscall // reserved for set_robust_list
+ data8 sys_ni_syscall // reserved for get_robust_list
data8 sys_sync_file_range // 1300
data8 sys_tee
data8 sys_vmsplice
diff --git a/arch/ia64/kernel/gate.lds.S b/arch/ia64/kernel/gate.lds.S
index cc35cddfd4c..6d198339bf8 100644
--- a/arch/ia64/kernel/gate.lds.S
+++ b/arch/ia64/kernel/gate.lds.S
@@ -12,6 +12,7 @@ SECTIONS
. = GATE_ADDR + SIZEOF_HEADERS;
.hash : { *(.hash) } :readable
+ .gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 561b8f1d3bc..44d540efa6d 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -197,6 +197,11 @@ start_ap:
;;
srlz.i
;;
+ {
+ flushrs // must be first insn in group
+ srlz.i
+ }
+ ;;
/*
* Save the region registers, predicate before they get clobbered
*/
@@ -853,7 +858,6 @@ END(__ia64_init_fpu)
*/
GLOBAL_ENTRY(ia64_switch_mode_phys)
{
- alloc r2=ar.pfs,0,0,0,0
rsm psr.i | psr.ic // disable interrupts and interrupt collection
mov r15=ip
}
@@ -902,7 +906,6 @@ END(ia64_switch_mode_phys)
*/
GLOBAL_ENTRY(ia64_switch_mode_virt)
{
- alloc r2=ar.pfs,0,0,0,0
rsm psr.i | psr.ic // disable interrupts and interrupt collection
mov r15=ip
}
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index b7cf651ceb1..3ead20fb6f4 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -62,7 +62,7 @@ EXPORT_SYMBOL(__udivdi3);
EXPORT_SYMBOL(__moddi3);
EXPORT_SYMBOL(__umoddi3);
-#if defined(CONFIG_MD_RAID5) || defined(CONFIG_MD_RAID5_MODULE)
+#if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE)
extern void xor_ia64_2(void);
extern void xor_ia64_3(void);
extern void xor_ia64_4(void);
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 00d9c83b802..781960f80b6 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -448,11 +448,20 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
return 0;
}
+void __kprobes flush_insn_slot(struct kprobe *p)
+{
+ unsigned long arm_addr;
+
+ arm_addr = ((unsigned long)&p->opcode.bundle) & ~0xFULL;
+ flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
+}
+
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
unsigned long addr = (unsigned long)p->addr;
unsigned long arm_addr = addr & ~0xFULL;
+ flush_insn_slot(p);
memcpy((char *)arm_addr, &p->ainsn.insn.bundle, sizeof(bundle_t));
flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
}
diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S
index 5018c7f2e7a..ebaf1e685f5 100644
--- a/arch/ia64/kernel/pal.S
+++ b/arch/ia64/kernel/pal.S
@@ -217,12 +217,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
.body
;;
ld8 loc2 = [loc2] // loc2 <- entry point
- mov out0 = in0 // first argument
- mov out1 = in1 // copy arg2
- mov out2 = in2 // copy arg3
- mov out3 = in3 // copy arg3
- ;;
- mov loc3 = psr // save psr
+ mov loc3 = psr // save psr
;;
mov loc4=ar.rsc // save RSE configuration
dep.z loc2=loc2,0,61 // convert pal entry point to physical
@@ -236,18 +231,23 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
;;
andcm r16=loc3,r16 // removes bits to clear from psr
br.call.sptk.many rp=ia64_switch_mode_phys
-.ret6:
+
+ mov out0 = in0 // first argument
+ mov out1 = in1 // copy arg2
+ mov out2 = in2 // copy arg3
+ mov out3 = in3 // copy arg3
mov loc5 = r19
mov loc6 = r20
+
br.call.sptk.many rp=b7 // now make the call
-.ret7:
+
mov ar.rsc=0 // put RSE in enforced lazy, LE mode
mov r16=loc3 // r16= original psr
mov r19=loc5
mov r20=loc6
br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
-.ret8: mov psr.l = loc3 // restore init PSR
+ mov psr.l = loc3 // restore init PSR
mov ar.pfs = loc1
mov rp = loc0
;;
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 3f5bac59209..0b546e2b36a 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -566,29 +566,23 @@ version_info(char *page)
pal_version_u_t min_ver, cur_ver;
char *p = page;
- /* The PAL_VERSION call is advertised as being able to support
- * both physical and virtual mode calls. This seems to be a documentation
- * bug rather than firmware bug. In fact, it does only support physical mode.
- * So now the code reflects this fact and the pal_version() has been updated
- * accordingly.
- */
- if (ia64_pal_version(&min_ver, &cur_ver) != 0) return 0;
+ if (ia64_pal_version(&min_ver, &cur_ver) != 0)
+ return 0;
p += sprintf(p,
"PAL_vendor : 0x%02x (min=0x%02x)\n"
- "PAL_A : %x.%x.%x (min=%x.%x.%x)\n"
- "PAL_B : %x.%x.%x (min=%x.%x.%x)\n",
- cur_ver.pal_version_s.pv_pal_vendor, min_ver.pal_version_s.pv_pal_vendor,
-
- cur_ver.pal_version_s.pv_pal_a_model>>4,
- cur_ver.pal_version_s.pv_pal_a_model&0xf, cur_ver.pal_version_s.pv_pal_a_rev,
- min_ver.pal_version_s.pv_pal_a_model>>4,
- min_ver.pal_version_s.pv_pal_a_model&0xf, min_ver.pal_version_s.pv_pal_a_rev,
-
- cur_ver.pal_version_s.pv_pal_b_model>>4,
- cur_ver.pal_version_s.pv_pal_b_model&0xf, cur_ver.pal_version_s.pv_pal_b_rev,
- min_ver.pal_version_s.pv_pal_b_model>>4,
- min_ver.pal_version_s.pv_pal_b_model&0xf, min_ver.pal_version_s.pv_pal_b_rev);
+ "PAL_A : %02x.%02x (min=%02x.%02x)\n"
+ "PAL_B : %02x.%02x (min=%02x.%02x)\n",
+ cur_ver.pal_version_s.pv_pal_vendor,
+ min_ver.pal_version_s.pv_pal_vendor,
+ cur_ver.pal_version_s.pv_pal_a_model,
+ cur_ver.pal_version_s.pv_pal_a_rev,
+ min_ver.pal_version_s.pv_pal_a_model,
+ min_ver.pal_version_s.pv_pal_a_rev,
+ cur_ver.pal_version_s.pv_pal_b_model,
+ cur_ver.pal_version_s.pv_pal_b_rev,
+ min_ver.pal_version_s.pv_pal_b_model,
+ min_ver.pal_version_s.pv_pal_b_rev);
return p - page;
}
@@ -958,9 +952,9 @@ remove_palinfo_proc_entries(unsigned int hcpu)
}
}
-static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
+#ifdef CONFIG_HOTPLUG_CPU
+static int palinfo_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
{
unsigned int hotcpu = (unsigned long)hcpu;
@@ -968,20 +962,19 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
case CPU_ONLINE:
create_palinfo_proc_entries(hotcpu);
break;
-#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
remove_palinfo_proc_entries(hotcpu);
break;
-#endif
}
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata palinfo_cpu_notifier =
+static struct notifier_block palinfo_cpu_notifier =
{
.notifier_call = palinfo_cpu_callback,
.priority = 0,
};
+#endif
static int __init
palinfo_init(void)
@@ -1020,7 +1013,7 @@ palinfo_exit(void)
/*
* Unregister from cpu notifier callbacks
*/
- unregister_cpu_notifier(&palinfo_cpu_notifier);
+ unregister_hotcpu_notifier(&palinfo_cpu_notifier);
}
module_init(palinfo_init);
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index c7ccd6ee1dd..84a7e52f56f 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -4936,13 +4936,15 @@ abort_locked:
if (likely(ctx)) {
DPRINT(("context unlocked\n"));
UNPROTECT_CTX(ctx, flags);
- fput(file);
}
/* copy argument back to user, if needed */
if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
error_args:
+ if (file)
+ fput(file);
+
kfree(args_k);
DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 6a33f414de5..7ad0d9cc6db 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -35,7 +35,7 @@
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/threads.h>
-#include <linux/tty.h>
+#include <linux/screen_info.h>
#include <linux/dmi.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index 40722d88607..9ef62a3fbfa 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -163,10 +163,25 @@ sys_pipe (void)
return retval;
}
+int ia64_mmap_check(unsigned long addr, unsigned long len,
+ unsigned long flags)
+{
+ unsigned long roff;
+
+ /*
+ * Don't permit mappings into unmapped space, the virtual page table
+ * of a region, or across a region boundary. Note: RGN_MAP_LIMIT is
+ * equal to 2^n-PAGE_SIZE (for some integer n <= 61) and len > 0.
+ */
+ roff = REGION_OFFSET(addr);
+ if ((len > RGN_MAP_LIMIT) || (roff > (RGN_MAP_LIMIT - len)))
+ return -EINVAL;
+ return 0;
+}
+
static inline unsigned long
do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, unsigned long pgoff)
{
- unsigned long roff;
struct file *file = NULL;
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
@@ -188,17 +203,6 @@ do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, un
goto out;
}
- /*
- * Don't permit mappings into unmapped space, the virtual page table of a region,
- * or across a region boundary. Note: RGN_MAP_LIMIT is equal to 2^n-PAGE_SIZE
- * (for some integer n <= 61) and len > 0.
- */
- roff = REGION_OFFSET(addr);
- if ((len > RGN_MAP_LIMIT) || (roff > (RGN_MAP_LIMIT - len))) {
- addr = -EINVAL;
- goto out;
- }
-
down_write(&current->mm->mmap_sem);
addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
up_write(&current->mm->mmap_sem);
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index b146f1cfad3..f648c610b10 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -67,10 +67,8 @@ static int __init topology_init(void)
#endif
sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL);
- if (!sysfs_cpus) {
- err = -ENOMEM;
- goto out;
- }
+ if (!sysfs_cpus)
+ panic("kzalloc in topology_init failed - NR_CPUS too big?");
for_each_present_cpu(i) {
if((err = arch_register_cpu(i)))
@@ -435,7 +433,7 @@ static int __cpuinit cache_sysfs_init(void)
(void *)(long)i);
}
- register_cpu_notifier(&cache_cpu_notifier);
+ register_hotcpu_notifier(&cache_cpu_notifier);
return 0;
}
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index e7bbb0f40aa..fffa9e0826b 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -117,11 +117,8 @@ die (const char *str, struct pt_regs *regs, long err)
die.lock_owner = -1;
spin_unlock_irq(&die.lock);
- if (panic_on_oops) {
- printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
- ssleep(5);
+ if (panic_on_oops)
panic("Fatal exception");
- }
do_exit(SIGSEGV);
}
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index 5f03b9e524d..4c73a676366 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -32,32 +32,38 @@
extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);
-#define MAX_UNCACHED_GRANULES 5
-static int allocated_granules;
+struct uncached_pool {
+ struct gen_pool *pool;
+ struct mutex add_chunk_mutex; /* serialize adding a converted chunk */
+ int nchunks_added; /* #of converted chunks added to pool */
+ atomic_t status; /* smp called function's return status*/
+};
+
+#define MAX_CONVERTED_CHUNKS_PER_NODE 2
-struct gen_pool *uncached_pool[MAX_NUMNODES];
+struct uncached_pool uncached_pools[MAX_NUMNODES];
static void uncached_ipi_visibility(void *data)
{
int status;
+ struct uncached_pool *uc_pool = (struct uncached_pool *)data;
status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
if ((status != PAL_VISIBILITY_OK) &&
(status != PAL_VISIBILITY_OK_REMOTE_NEEDED))
- printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on "
- "CPU %i\n", status, raw_smp_processor_id());
+ atomic_inc(&uc_pool->status);
}
static void uncached_ipi_mc_drain(void *data)
{
int status;
+ struct uncached_pool *uc_pool = (struct uncached_pool *)data;
status = ia64_pal_mc_drain();
- if (status)
- printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on "
- "CPU %i\n", status, raw_smp_processor_id());
+ if (status != PAL_STATUS_SUCCESS)
+ atomic_inc(&uc_pool->status);
}
@@ -70,21 +76,34 @@ static void uncached_ipi_mc_drain(void *data)
* This is accomplished by first allocating a granule of cached memory pages
* and then converting them to uncached memory pages.
*/
-static int uncached_add_chunk(struct gen_pool *pool, int nid)
+static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
{
struct page *page;
- int status, i;
+ int status, i, nchunks_added = uc_pool->nchunks_added;
unsigned long c_addr, uc_addr;
- if (allocated_granules >= MAX_UNCACHED_GRANULES)
+ if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0)
+ return -1; /* interrupted by a signal */
+
+ if (uc_pool->nchunks_added > nchunks_added) {
+ /* someone added a new chunk while we were waiting */
+ mutex_unlock(&uc_pool->add_chunk_mutex);
+ return 0;
+ }
+
+ if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) {
+ mutex_unlock(&uc_pool->add_chunk_mutex);
return -1;
+ }
/* attempt to allocate a granule's worth of cached memory pages */
page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO,
IA64_GRANULE_SHIFT-PAGE_SHIFT);
- if (!page)
+ if (!page) {
+ mutex_unlock(&uc_pool->add_chunk_mutex);
return -1;
+ }
/* convert the memory pages from cached to uncached */
@@ -102,11 +121,14 @@ static int uncached_add_chunk(struct gen_pool *pool, int nid)
flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE);
status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
- if (!status) {
- status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1);
- if (status)
+ if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
+ atomic_set(&uc_pool->status, 0);
+ status = smp_call_function(uncached_ipi_visibility, uc_pool,
+ 0, 1);
+ if (status || atomic_read(&uc_pool->status))
goto failed;
- }
+ } else if (status != PAL_VISIBILITY_OK)
+ goto failed;
preempt_disable();
@@ -120,20 +142,24 @@ static int uncached_add_chunk(struct gen_pool *pool, int nid)
preempt_enable();
- ia64_pal_mc_drain();
- status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1);
- if (status)
+ status = ia64_pal_mc_drain();
+ if (status != PAL_STATUS_SUCCESS)
+ goto failed;
+ atomic_set(&uc_pool->status, 0);
+ status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1);
+ if (status || atomic_read(&uc_pool->status))
goto failed;
/*
* The chunk of memory pages has been converted to uncached so now we
* can add it to the pool.
*/
- status = gen_pool_add(pool, uc_addr, IA64_GRANULE_SIZE, nid);
+ status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);
if (status)
goto failed;
- allocated_granules++;
+ uc_pool->nchunks_added++;
+ mutex_unlock(&uc_pool->add_chunk_mutex);
return 0;
/* failed to convert or add the chunk so give it back to the kernel */
@@ -142,6 +168,7 @@ failed:
ClearPageUncached(&page[i]);
free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
+ mutex_unlock(&uc_pool->add_chunk_mutex);
return -1;
}
@@ -158,7 +185,7 @@ failed:
unsigned long uncached_alloc_page(int starting_nid)
{
unsigned long uc_addr;
- struct gen_pool *pool;
+ struct uncached_pool *uc_pool;
int nid;
if (unlikely(starting_nid >= MAX_NUMNODES))
@@ -171,14 +198,14 @@ unsigned long uncached_alloc_page(int starting_nid)
do {
if (!node_online(nid))
continue;
- pool = uncached_pool[nid];
- if (pool == NULL)
+ uc_pool = &uncached_pools[nid];
+ if (uc_pool->pool == NULL)
continue;
do {
- uc_addr = gen_pool_alloc(pool, PAGE_SIZE);
+ uc_addr = gen_pool_alloc(uc_pool->pool, PAGE_SIZE);
if (uc_addr != 0)
return uc_addr;
- } while (uncached_add_chunk(pool, nid) == 0);
+ } while (uncached_add_chunk(uc_pool, nid) == 0);
} while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);
@@ -197,7 +224,7 @@ EXPORT_SYMBOL(uncached_alloc_page);
void uncached_free_page(unsigned long uc_addr)
{
int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
- struct gen_pool *pool = uncached_pool[nid];
+ struct gen_pool *pool = uncached_pools[nid].pool;
if (unlikely(pool == NULL))
return;
@@ -224,7 +251,7 @@ static int __init uncached_build_memmap(unsigned long uc_start,
unsigned long uc_end, void *arg)
{
int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
- struct gen_pool *pool = uncached_pool[nid];
+ struct gen_pool *pool = uncached_pools[nid].pool;
size_t size = uc_end - uc_start;
touch_softlockup_watchdog();
@@ -242,7 +269,8 @@ static int __init uncached_init(void)
int nid;
for_each_online_node(nid) {
- uncached_pool[nid] = gen_pool_create(PAGE_SHIFT, nid);
+ uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid);
+ mutex_init(&uncached_pools[nid].add_chunk_mutex);
}
efi_memmap_walk_uc(uncached_build_memmap, NULL);
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile
index d8536a2c22a..38fa6e49e79 100644
--- a/arch/ia64/lib/Makefile
+++ b/arch/ia64/lib/Makefile
@@ -14,7 +14,7 @@ lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o
lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o
lib-$(CONFIG_PERFMON) += carta_random.o
-lib-$(CONFIG_MD_RAID5) += xor.o
+lib-$(CONFIG_MD_RAID456) += xor.o
AFLAGS___divdi3.o =
AFLAGS___udivdi3.o = -DUNSIGNED
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 2a88cdd6d92..e004143ba86 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -27,6 +27,7 @@
#ifdef CONFIG_VIRTUAL_MEM_MAP
static unsigned long num_dma_physpages;
+static unsigned long max_gap;
#endif
/**
@@ -45,9 +46,15 @@ show_mem (void)
printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
i = max_mapnr;
- while (i-- > 0) {
- if (!pfn_valid(i))
+ for (i = 0; i < max_mapnr; i++) {
+ if (!pfn_valid(i)) {
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+ if (max_gap < LARGE_GAP)
+ continue;
+ i = vmemmap_find_next_valid_pfn(0, i) - 1;
+#endif
continue;
+ }
total++;
if (PageReserved(mem_map+i))
reserved++;
@@ -234,7 +241,6 @@ paging_init (void)
unsigned long zones_size[MAX_NR_ZONES];
#ifdef CONFIG_VIRTUAL_MEM_MAP
unsigned long zholes_size[MAX_NR_ZONES];
- unsigned long max_gap;
#endif
/* initialize mem_map[] */
@@ -266,7 +272,6 @@ paging_init (void)
}
}
- max_gap = 0;
efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
if (max_gap < LARGE_GAP) {
vmem_map = (struct page *) 0;
@@ -277,7 +282,8 @@ paging_init (void)
/* allocate virtual_mem_map */
- map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page));
+ map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
+ sizeof(struct page));
vmalloc_end -= map_size;
vmem_map = (struct page *) vmalloc_end;
efi_memmap_walk(create_mem_map_page_table, NULL);
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 99bd9e30db9..d260bffa01a 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -534,68 +534,6 @@ void __cpuinit *per_cpu_init(void)
}
#endif /* CONFIG_SMP */
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i)
-{
- unsigned long end_address, hole_next_pfn;
- unsigned long stop_address;
-
- end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
- end_address = PAGE_ALIGN(end_address);
-
- stop_address = (unsigned long) &vmem_map[
- pgdat->node_start_pfn + pgdat->node_spanned_pages];
-
- do {
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-
- pgd = pgd_offset_k(end_address);
- if (pgd_none(*pgd)) {
- end_address += PGDIR_SIZE;
- continue;
- }
-
- pud = pud_offset(pgd, end_address);
- if (pud_none(*pud)) {
- end_address += PUD_SIZE;
- continue;
- }
-
- pmd = pmd_offset(pud, end_address);
- if (pmd_none(*pmd)) {
- end_address += PMD_SIZE;
- continue;
- }
-
- pte = pte_offset_kernel(pmd, end_address);
-retry_pte:
- if (pte_none(*pte)) {
- end_address += PAGE_SIZE;
- pte++;
- if ((end_address < stop_address) &&
- (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
- goto retry_pte;
- continue;
- }
- /* Found next valid vmem_map page */
- break;
- } while (end_address < stop_address);
-
- end_address = min(end_address, stop_address);
- end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
- hole_next_pfn = end_address / sizeof(struct page);
- return hole_next_pfn - pgdat->node_start_pfn;
-}
-#else
-static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i)
-{
- return i + 1;
-}
-#endif
-
/**
* show_mem - give short summary of memory stats
*
@@ -625,7 +563,8 @@ void show_mem(void)
if (pfn_valid(pgdat->node_start_pfn + i))
page = pfn_to_page(pgdat->node_start_pfn + i);
else {
- i = find_next_valid_pfn_for_pgdat(pgdat, i) - 1;
+ i = vmemmap_find_next_valid_pfn(pgdat->node_id,
+ i) - 1;
continue;
}
if (PageReserved(page))
@@ -751,7 +690,8 @@ void __init paging_init(void)
efi_memmap_walk(filter_rsvd_memory, count_node_pages);
#ifdef CONFIG_VIRTUAL_MEM_MAP
- vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page));
+ vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
+ sizeof(struct page));
vmem_map = (struct page *) vmalloc_end;
efi_memmap_walk(create_mem_map_page_table, NULL);
printk("Virtual mem_map starts at 0x%p\n", vmem_map);
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 2f50c064513..30617ccb4f7 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -415,6 +415,61 @@ ia64_mmu_init (void *my_cpu_data)
}
#ifdef CONFIG_VIRTUAL_MEM_MAP
+int vmemmap_find_next_valid_pfn(int node, int i)
+{
+ unsigned long end_address, hole_next_pfn;
+ unsigned long stop_address;
+ pg_data_t *pgdat = NODE_DATA(node);
+
+ end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
+ end_address = PAGE_ALIGN(end_address);
+
+ stop_address = (unsigned long) &vmem_map[
+ pgdat->node_start_pfn + pgdat->node_spanned_pages];
+
+ do {
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pgd = pgd_offset_k(end_address);
+ if (pgd_none(*pgd)) {
+ end_address += PGDIR_SIZE;
+ continue;
+ }
+
+ pud = pud_offset(pgd, end_address);
+ if (pud_none(*pud)) {
+ end_address += PUD_SIZE;
+ continue;
+ }
+
+ pmd = pmd_offset(pud, end_address);
+ if (pmd_none(*pmd)) {
+ end_address += PMD_SIZE;
+ continue;
+ }
+
+ pte = pte_offset_kernel(pmd, end_address);
+retry_pte:
+ if (pte_none(*pte)) {
+ end_address += PAGE_SIZE;
+ pte++;
+ if ((end_address < stop_address) &&
+ (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
+ goto retry_pte;
+ continue;
+ }
+ /* Found next valid vmem_map page */
+ break;
+ } while (end_address < stop_address);
+
+ end_address = min(end_address, stop_address);
+ end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
+ hole_next_pfn = end_address / sizeof(struct page);
+ return hole_next_pfn - pgdat->node_start_pfn;
+}
int __init
create_mem_map_page_table (u64 start, u64 end, void *arg)
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 07bd02b6c37..4280c074d64 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -32,7 +32,7 @@ ioremap (unsigned long offset, unsigned long size)
*/
attr = kern_mem_attribute(offset, size);
if (attr & EFI_MEMORY_WB)
- return phys_to_virt(offset);
+ return (void __iomem *) phys_to_virt(offset);
else if (attr & EFI_MEMORY_UC)
return __ioremap(offset, size);
@@ -43,7 +43,7 @@ ioremap (unsigned long offset, unsigned long size)
gran_base = GRANULEROUNDDOWN(offset);
gran_size = GRANULEROUNDUP(offset + size) - gran_base;
if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB)
- return phys_to_virt(offset);
+ return (void __iomem *) phys_to_virt(offset);
return __ioremap(offset, size);
}
@@ -53,7 +53,7 @@ void __iomem *
ioremap_nocache (unsigned long offset, unsigned long size)
{
if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB)
- return 0;
+ return NULL;
return __ioremap(offset, size);
}
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 276512fd892..60b45e79f08 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -650,7 +650,7 @@ pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma)
* Avoid attribute aliasing. See Documentation/ia64/aliasing.txt
* for more details.
*/
- if (!valid_mmap_phys_addr_range(vma->vm_pgoff << PAGE_SHIFT, size))
+ if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
return -EINVAL;
prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
vma->vm_page_prot);
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index dd6bcf4d58b..5f2dcba7fa8 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -12,7 +12,7 @@
#include <linux/kernel.h>
#include <linux/kdev_t.h>
#include <linux/string.h>
-#include <linux/tty.h>
+#include <linux/screen_info.h>
#include <linux/console.h>
#include <linux/timex.h>
#include <linux/sched.h>
@@ -565,7 +565,7 @@ static void __init sn_init_pdas(char **cmdline_p)
* Also sets up a few fields in the nodepda. Also known as
* platform_cpu_init() by the ia64 machvec code.
*/
-void __init sn_cpu_init(void)
+void __cpuinit sn_cpu_init(void)
{
int cpuid;
int cpuphyid;
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c
index c2f69f7942a..1f3540826e6 100644
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ b/arch/ia64/sn/kernel/xpc_channel.c
@@ -279,8 +279,8 @@ xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
return part->reason;
}
- bte_ret = xp_bte_copy((u64) src, (u64) ia64_tpa((u64) dst),
- (u64) cnt, (BTE_NORMAL | BTE_WACQUIRE), NULL);
+ bte_ret = xp_bte_copy((u64) src, (u64) dst, (u64) cnt,
+ (BTE_NORMAL | BTE_WACQUIRE), NULL);
if (bte_ret == BTE_SUCCESS) {
return xpcSuccess;
}
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
index 99b123a6421..4d026f9dd98 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/arch/ia64/sn/kernel/xpc_main.c
@@ -480,7 +480,7 @@ xpc_activating(void *__partid)
partid_t partid = (u64) __partid;
struct xpc_partition *part = &xpc_partitions[partid];
unsigned long irq_flags;
- struct sched_param param = { sched_priority: MAX_RT_PRIO - 1 };
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
int ret;
@@ -1052,6 +1052,8 @@ xpc_do_exit(enum xpc_retval reason)
if (xpc_sysctl) {
unregister_sysctl_table(xpc_sysctl);
}
+
+ kfree(xpc_remote_copy_buffer_base);
}
@@ -1212,24 +1214,20 @@ xpc_init(void)
partid_t partid;
struct xpc_partition *part;
pid_t pid;
+ size_t buf_size;
if (!ia64_platform_is("sn2")) {
return -ENODEV;
}
- /*
- * xpc_remote_copy_buffer is used as a temporary buffer for bte_copy'ng
- * various portions of a partition's reserved page. Its size is based
- * on the size of the reserved page header and part_nasids mask. So we
- * need to ensure that the other items will fit as well.
- */
- if (XPC_RP_VARS_SIZE > XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES) {
- dev_err(xpc_part, "xpc_remote_copy_buffer is not big enough\n");
- return -EPERM;
- }
- DBUG_ON((u64) xpc_remote_copy_buffer !=
- L1_CACHE_ALIGN((u64) xpc_remote_copy_buffer));
+
+ buf_size = max(XPC_RP_VARS_SIZE,
+ XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES);
+ xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size,
+ GFP_KERNEL, &xpc_remote_copy_buffer_base);
+ if (xpc_remote_copy_buffer == NULL)
+ return -ENOMEM;
snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part");
snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan");
@@ -1293,6 +1291,8 @@ xpc_init(void)
if (xpc_sysctl) {
unregister_sysctl_table(xpc_sysctl);
}
+
+ kfree(xpc_remote_copy_buffer_base);
return -EBUSY;
}
@@ -1311,6 +1311,8 @@ xpc_init(void)
if (xpc_sysctl) {
unregister_sysctl_table(xpc_sysctl);
}
+
+ kfree(xpc_remote_copy_buffer_base);
return -EBUSY;
}
@@ -1362,6 +1364,8 @@ xpc_init(void)
if (xpc_sysctl) {
unregister_sysctl_table(xpc_sysctl);
}
+
+ kfree(xpc_remote_copy_buffer_base);
return -EBUSY;
}
diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c
index 2a89cfce495..57c723f5cba 100644
--- a/arch/ia64/sn/kernel/xpc_partition.c
+++ b/arch/ia64/sn/kernel/xpc_partition.c
@@ -71,19 +71,15 @@ struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
* Generic buffer used to store a local copy of portions of a remote
* partition's reserved page (either its header and part_nasids mask,
* or its vars).
- *
- * xpc_discovery runs only once and is a seperate thread that is
- * very likely going to be processing in parallel with receiving
- * interrupts.
*/
-char ____cacheline_aligned xpc_remote_copy_buffer[XPC_RP_HEADER_SIZE +
- XP_NASID_MASK_BYTES];
+char *xpc_remote_copy_buffer;
+void *xpc_remote_copy_buffer_base;
/*
* Guarantee that the kmalloc'd memory is cacheline aligned.
*/
-static void *
+void *
xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
{
/* see if kmalloc will give us cachline aligned memory by default */
@@ -148,7 +144,7 @@ xpc_get_rsvd_page_pa(int nasid)
}
}
- bte_res = xp_bte_copy(rp_pa, ia64_tpa(buf), buf_len,
+ bte_res = xp_bte_copy(rp_pa, buf, buf_len,
(BTE_NOTIFY | BTE_WACQUIRE), NULL);
if (bte_res != BTE_SUCCESS) {
dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res);
@@ -447,7 +443,7 @@ xpc_check_remote_hb(void)
/* pull the remote_hb cache line */
bres = xp_bte_copy(part->remote_vars_pa,
- ia64_tpa((u64) remote_vars),
+ (u64) remote_vars,
XPC_RP_VARS_SIZE,
(BTE_NOTIFY | BTE_WACQUIRE), NULL);
if (bres != BTE_SUCCESS) {
@@ -498,8 +494,7 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
/* pull over the reserved page header and part_nasids mask */
-
- bres = xp_bte_copy(*remote_rp_pa, ia64_tpa((u64) remote_rp),
+ bres = xp_bte_copy(*remote_rp_pa, (u64) remote_rp,
XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes,
(BTE_NOTIFY | BTE_WACQUIRE), NULL);
if (bres != BTE_SUCCESS) {
@@ -554,11 +549,8 @@ xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
return xpcVarsNotSet;
}
-
/* pull over the cross partition variables */
-
- bres = xp_bte_copy(remote_vars_pa, ia64_tpa((u64) remote_vars),
- XPC_RP_VARS_SIZE,
+ bres = xp_bte_copy(remote_vars_pa, (u64) remote_vars, XPC_RP_VARS_SIZE,
(BTE_NOTIFY | BTE_WACQUIRE), NULL);
if (bres != BTE_SUCCESS) {
return xpc_map_bte_errors(bres);
@@ -1239,7 +1231,7 @@ xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
part_nasid_pa = (u64) XPC_RP_PART_NASIDS(part->remote_rp_pa);
- bte_res = xp_bte_copy(part_nasid_pa, ia64_tpa((u64) nasid_mask),
+ bte_res = xp_bte_copy(part_nasid_pa, (u64) nasid_mask,
xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
return xpc_map_bte_errors(bte_res);
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index 17cd3428488..af7171adcd2 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -74,7 +74,7 @@ tioce_mmr_war_pre(struct tioce_kernel *kern, void *mmr_addr)
else
mmr_war_offset = 0x158;
- readq_relaxed((void *)(mmr_base + mmr_war_offset));
+ readq_relaxed((void __iomem *)(mmr_base + mmr_war_offset));
}
}
@@ -92,8 +92,8 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr)
if (mmr_offset < 0x45000) {
if (mmr_offset == 0x100)
- readq_relaxed((void *)(mmr_base + 0x38));
- readq_relaxed((void *)(mmr_base + 0xb050));
+ readq_relaxed((void __iomem *)(mmr_base + 0x38));
+ readq_relaxed((void __iomem *)(mmr_base + 0xb050));
}
}