summaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/acpi.c53
-rw-r--r--arch/ia64/kernel/entry.S4
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c15
-rw-r--r--arch/ia64/kernel/setup.c4
-rw-r--r--arch/ia64/kernel/smpboot.c5
-rw-r--r--arch/ia64/kernel/time.c39
-rw-r--r--arch/ia64/kernel/traps.c8
7 files changed, 86 insertions, 42 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index d2702c419cf..ecd44bdc839 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -761,6 +761,59 @@ int acpi_map_cpu2node(acpi_handle handle, int cpu, long physid)
return (0);
}
+int additional_cpus __initdata = -1;
+
+static __init int setup_additional_cpus(char *s)
+{
+ if (s)
+ additional_cpus = simple_strtol(s, NULL, 0);
+
+ return 0;
+}
+
+early_param("additional_cpus", setup_additional_cpus);
+
+/*
+ * cpu_possible_map should be static, it cannot change as cpu's
+ * are onlined, or offlined. The reason is per-cpu data-structures
+ * are allocated by some modules at init time, and dont expect to
+ * do this dynamically on cpu arrival/departure.
+ * cpu_present_map on the other hand can change dynamically.
+ * In case when cpu_hotplug is not compiled, then we resort to current
+ * behaviour, which is cpu_possible == cpu_present.
+ * - Ashok Raj
+ *
+ * Three ways to find out the number of additional hotplug CPUs:
+ * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
+ * - The user can overwrite it with additional_cpus=NUM
+ * - Otherwise don't reserve additional CPUs.
+ */
+__init void prefill_possible_map(void)
+{
+ int i;
+ int possible, disabled_cpus;
+
+ disabled_cpus = total_cpus - available_cpus;
+
+ if (additional_cpus == -1) {
+ if (disabled_cpus > 0)
+ additional_cpus = disabled_cpus;
+ else
+ additional_cpus = 0;
+ }
+
+ possible = available_cpus + additional_cpus;
+
+ if (possible > NR_CPUS)
+ possible = NR_CPUS;
+
+ printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
+ possible, max((possible - available_cpus), 0));
+
+ for (i = 0; i < possible; i++)
+ cpu_set(i, cpu_possible_map);
+}
+
int acpi_map_lsapic(acpi_handle handle, int *pcpu)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 27b222c277e..930fdfca6dd 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -569,7 +569,9 @@ GLOBAL_ENTRY(ia64_trace_syscall)
.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8
.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
-.ret3: br.cond.sptk .work_pending_syscall_end
+.ret3:
+(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
+ br.cond.sptk .work_pending_syscall_end
strace_error:
ld8 r3=[r2] // load pt_regs.r8
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index e72de580ebb..bbcfd08378a 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -10,23 +10,8 @@
#include <linux/string.h>
EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memchr);
-EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(memscan);
-EXPORT_SYMBOL(strcat);
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strcmp);
-EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strncat);
-EXPORT_SYMBOL(strncmp);
-EXPORT_SYMBOL(strncpy);
-EXPORT_SYMBOL(strnlen);
-EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(strstr);
-EXPORT_SYMBOL(strpbrk);
#include <asm/checksum.h>
EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 35f7835294a..3258e09278d 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -430,6 +430,7 @@ setup_arch (char **cmdline_p)
if (early_console_setup(*cmdline_p) == 0)
mark_bsp_online();
+ parse_early_param();
#ifdef CONFIG_ACPI
/* Initialize the ACPI boot-time table parser */
acpi_table_init();
@@ -688,6 +689,9 @@ void
setup_per_cpu_areas (void)
{
/* start_kernel() requires this... */
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
+ prefill_possible_map();
+#endif
}
/*
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 8f44e7d2df6..b681ef34a86 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -129,7 +129,7 @@ DEFINE_PER_CPU(int, cpu_state);
/* Bitmasks of currently online, and possible CPUs */
cpumask_t cpu_online_map;
EXPORT_SYMBOL(cpu_online_map);
-cpumask_t cpu_possible_map;
+cpumask_t cpu_possible_map = CPU_MASK_NONE;
EXPORT_SYMBOL(cpu_possible_map);
cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
@@ -506,9 +506,6 @@ smp_build_cpu_map (void)
for (cpu = 0; cpu < NR_CPUS; cpu++) {
ia64_cpu_to_sapicid[cpu] = -1;
-#ifdef CONFIG_HOTPLUG_CPU
- cpu_set(cpu, cpu_possible_map);
-#endif
}
ia64_cpu_to_sapicid[0] = boot_cpu_id;
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index a094ec49ccf..307d01e15b2 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -250,32 +250,27 @@ time_init (void)
set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
}
-#define SMALLUSECS 100
-
-void
-udelay (unsigned long usecs)
+/*
+ * Generic udelay assumes that if preemption is allowed and the thread
+ * migrates to another CPU, that the ITC values are synchronized across
+ * all CPUs.
+ */
+static void
+ia64_itc_udelay (unsigned long usecs)
{
- unsigned long start;
- unsigned long cycles;
- unsigned long smallusecs;
+ unsigned long start = ia64_get_itc();
+ unsigned long end = start + usecs*local_cpu_data->cyc_per_usec;
- /*
- * Execute the non-preemptible delay loop (because the ITC might
- * not be synchronized between CPUS) in relatively short time
- * chunks, allowing preemption between the chunks.
- */
- while (usecs > 0) {
- smallusecs = (usecs > SMALLUSECS) ? SMALLUSECS : usecs;
- preempt_disable();
- cycles = smallusecs*local_cpu_data->cyc_per_usec;
- start = ia64_get_itc();
+ while (time_before(ia64_get_itc(), end))
+ cpu_relax();
+}
- while (ia64_get_itc() - start < cycles)
- cpu_relax();
+void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay;
- preempt_enable();
- usecs -= smallusecs;
- }
+void
+udelay (unsigned long usecs)
+{
+ (*ia64_udelay)(usecs);
}
EXPORT_SYMBOL(udelay);
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index 55391901b01..dabd6c32641 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -16,6 +16,7 @@
#include <linux/module.h> /* for EXPORT_SYMBOL */
#include <linux/hardirq.h>
#include <linux/kprobes.h>
+#include <linux/delay.h> /* for ssleep() */
#include <asm/fpswa.h>
#include <asm/ia32.h>
@@ -116,6 +117,13 @@ die (const char *str, struct pt_regs *regs, long err)
bust_spinlocks(0);
die.lock_owner = -1;
spin_unlock_irq(&die.lock);
+
+ if (panic_on_oops) {
+ printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
+ ssleep(5);
+ panic("Fatal exception");
+ }
+
do_exit(SIGSEGV);
}