summaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/acpi.c13
-rw-r--r--arch/ia64/kernel/efi.c510
-rw-r--r--arch/ia64/kernel/irq.c12
-rw-r--r--arch/ia64/kernel/mca.c4
-rw-r--r--arch/ia64/kernel/module.c6
-rw-r--r--arch/ia64/kernel/patch.c16
-rw-r--r--arch/ia64/kernel/ptrace.c28
-rw-r--r--arch/ia64/kernel/setup.c63
-rw-r--r--arch/ia64/kernel/smp.c10
-rw-r--r--arch/ia64/kernel/smpboot.c6
-rw-r--r--arch/ia64/kernel/uncached.c17
11 files changed, 436 insertions, 249 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 7e926471e4e..9ad94ddf668 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -838,7 +838,7 @@ EXPORT_SYMBOL(acpi_unmap_lsapic);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
#ifdef CONFIG_ACPI_NUMA
-acpi_status __devinit
+static acpi_status __devinit
acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -890,7 +890,16 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
map_iosapic_to_node(gsi_base, node);
return AE_OK;
}
-#endif /* CONFIG_NUMA */
+
+static int __init
+acpi_map_iosapics (void)
+{
+ acpi_get_devices(NULL, acpi_map_iosapic, NULL, NULL);
+ return 0;
+}
+
+fs_initcall(acpi_map_iosapics);
+#endif /* CONFIG_ACPI_NUMA */
int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
{
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 179f230816e..f72ea6aebcb 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -239,57 +239,30 @@ is_available_memory (efi_memory_desc_t *md)
return 0;
}
-/*
- * Trim descriptor MD so its starts at address START_ADDR. If the descriptor covers
- * memory that is normally available to the kernel, issue a warning that some memory
- * is being ignored.
- */
-static void
-trim_bottom (efi_memory_desc_t *md, u64 start_addr)
-{
- u64 num_skipped_pages;
+typedef struct kern_memdesc {
+ u64 attribute;
+ u64 start;
+ u64 num_pages;
+} kern_memdesc_t;
- if (md->phys_addr >= start_addr || !md->num_pages)
- return;
-
- num_skipped_pages = (start_addr - md->phys_addr) >> EFI_PAGE_SHIFT;
- if (num_skipped_pages > md->num_pages)
- num_skipped_pages = md->num_pages;
-
- if (is_available_memory(md))
- printk(KERN_NOTICE "efi.%s: ignoring %luKB of memory at 0x%lx due to granule hole "
- "at 0x%lx\n", __FUNCTION__,
- (num_skipped_pages << EFI_PAGE_SHIFT) >> 10,
- md->phys_addr, start_addr - IA64_GRANULE_SIZE);
- /*
- * NOTE: Don't set md->phys_addr to START_ADDR because that could cause the memory
- * descriptor list to become unsorted. In such a case, md->num_pages will be
- * zero, so the Right Thing will happen.
- */
- md->phys_addr += num_skipped_pages << EFI_PAGE_SHIFT;
- md->num_pages -= num_skipped_pages;
-}
+static kern_memdesc_t *kern_memmap;
static void
-trim_top (efi_memory_desc_t *md, u64 end_addr)
+walk (efi_freemem_callback_t callback, void *arg, u64 attr)
{
- u64 num_dropped_pages, md_end_addr;
-
- md_end_addr = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
-
- if (md_end_addr <= end_addr || !md->num_pages)
- return;
+ kern_memdesc_t *k;
+ u64 start, end, voff;
- num_dropped_pages = (md_end_addr - end_addr) >> EFI_PAGE_SHIFT;
- if (num_dropped_pages > md->num_pages)
- num_dropped_pages = md->num_pages;
-
- if (is_available_memory(md))
- printk(KERN_NOTICE "efi.%s: ignoring %luKB of memory at 0x%lx due to granule hole "
- "at 0x%lx\n", __FUNCTION__,
- (num_dropped_pages << EFI_PAGE_SHIFT) >> 10,
- md->phys_addr, end_addr);
- md->num_pages -= num_dropped_pages;
+ voff = (attr == EFI_MEMORY_WB) ? PAGE_OFFSET : __IA64_UNCACHED_OFFSET;
+ for (k = kern_memmap; k->start != ~0UL; k++) {
+ if (k->attribute != attr)
+ continue;
+ start = PAGE_ALIGN(k->start);
+ end = (k->start + (k->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK;
+ if (start < end)
+ if ((*callback)(start + voff, end + voff, arg) < 0)
+ return;
+ }
}
/*
@@ -299,148 +272,19 @@ trim_top (efi_memory_desc_t *md, u64 end_addr)
void
efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
{
- int prev_valid = 0;
- struct range {
- u64 start;
- u64 end;
- } prev, curr;
- void *efi_map_start, *efi_map_end, *p, *q;
- efi_memory_desc_t *md, *check_md;
- u64 efi_desc_size, start, end, granule_addr, last_granule_addr, first_non_wb_addr = 0;
- unsigned long total_mem = 0;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
-
- /* skip over non-WB memory descriptors; that's all we're interested in... */
- if (!(md->attribute & EFI_MEMORY_WB))
- continue;
-
- /*
- * granule_addr is the base of md's first granule.
- * [granule_addr - first_non_wb_addr) is guaranteed to
- * be contiguous WB memory.
- */
- granule_addr = GRANULEROUNDDOWN(md->phys_addr);
- first_non_wb_addr = max(first_non_wb_addr, granule_addr);
-
- if (first_non_wb_addr < md->phys_addr) {
- trim_bottom(md, granule_addr + IA64_GRANULE_SIZE);
- granule_addr = GRANULEROUNDDOWN(md->phys_addr);
- first_non_wb_addr = max(first_non_wb_addr, granule_addr);
- }
-
- for (q = p; q < efi_map_end; q += efi_desc_size) {
- check_md = q;
-
- if ((check_md->attribute & EFI_MEMORY_WB) &&
- (check_md->phys_addr == first_non_wb_addr))
- first_non_wb_addr += check_md->num_pages << EFI_PAGE_SHIFT;
- else
- break; /* non-WB or hole */
- }
-
- last_granule_addr = GRANULEROUNDDOWN(first_non_wb_addr);
- if (last_granule_addr < md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT))
- trim_top(md, last_granule_addr);
-
- if (is_available_memory(md)) {
- if (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) >= max_addr) {
- if (md->phys_addr >= max_addr)
- continue;
- md->num_pages = (max_addr - md->phys_addr) >> EFI_PAGE_SHIFT;
- first_non_wb_addr = max_addr;
- }
-
- if (total_mem >= mem_limit)
- continue;
-
- if (total_mem + (md->num_pages << EFI_PAGE_SHIFT) > mem_limit) {
- unsigned long limit_addr = md->phys_addr;
-
- limit_addr += mem_limit - total_mem;
- limit_addr = GRANULEROUNDDOWN(limit_addr);
-
- if (md->phys_addr > limit_addr)
- continue;
-
- md->num_pages = (limit_addr - md->phys_addr) >>
- EFI_PAGE_SHIFT;
- first_non_wb_addr = max_addr = md->phys_addr +
- (md->num_pages << EFI_PAGE_SHIFT);
- }
- total_mem += (md->num_pages << EFI_PAGE_SHIFT);
-
- if (md->num_pages == 0)
- continue;
-
- curr.start = PAGE_OFFSET + md->phys_addr;
- curr.end = curr.start + (md->num_pages << EFI_PAGE_SHIFT);
-
- if (!prev_valid) {
- prev = curr;
- prev_valid = 1;
- } else {
- if (curr.start < prev.start)
- printk(KERN_ERR "Oops: EFI memory table not ordered!\n");
-
- if (prev.end == curr.start) {
- /* merge two consecutive memory ranges */
- prev.end = curr.end;
- } else {
- start = PAGE_ALIGN(prev.start);
- end = prev.end & PAGE_MASK;
- if ((end > start) && (*callback)(start, end, arg) < 0)
- return;
- prev = curr;
- }
- }
- }
- }
- if (prev_valid) {
- start = PAGE_ALIGN(prev.start);
- end = prev.end & PAGE_MASK;
- if (end > start)
- (*callback)(start, end, arg);
- }
+ walk(callback, arg, EFI_MEMORY_WB);
}
/*
- * Walk the EFI memory map to pull out leftover pages in the lower
- * memory regions which do not end up in the regular memory map and
- * stick them into the uncached allocator
- *
- * The regular walk function is significantly more complex than the
- * uncached walk which means it really doesn't make sense to try and
- * marge the two.
+ * Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that
+ * has memory that is available for uncached allocator.
*/
-void __init
-efi_memmap_walk_uc (efi_freemem_callback_t callback)
+void
+efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg)
{
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size, start, end;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
- if (md->attribute == EFI_MEMORY_UC) {
- start = PAGE_ALIGN(md->phys_addr);
- end = PAGE_ALIGN((md->phys_addr+(md->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK);
- if ((*callback)(start, end, NULL) < 0)
- return;
- }
- }
+ walk(callback, arg, EFI_MEMORY_UC);
}
-
/*
* Look for the PAL_CODE region reported by EFI and maps it using an
* ITR to enable safe PAL calls in virtual mode. See IA-64 Processor
@@ -862,3 +706,307 @@ efi_uart_console_only(void)
printk(KERN_ERR "Malformed %s value\n", name);
return 0;
}
+
+#define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT)
+
+static inline u64
+kmd_end(kern_memdesc_t *kmd)
+{
+ return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT));
+}
+
+static inline u64
+efi_md_end(efi_memory_desc_t *md)
+{
+ return (md->phys_addr + efi_md_size(md));
+}
+
+static inline int
+efi_wb(efi_memory_desc_t *md)
+{
+ return (md->attribute & EFI_MEMORY_WB);
+}
+
+static inline int
+efi_uc(efi_memory_desc_t *md)
+{
+ return (md->attribute & EFI_MEMORY_UC);
+}
+
+/*
+ * Look for the first granule aligned memory descriptor memory
+ * that is big enough to hold EFI memory map. Make sure this
+ * descriptor is atleast granule sized so it does not get trimmed
+ */
+struct kern_memdesc *
+find_memmap_space (void)
+{
+ u64 contig_low=0, contig_high=0;
+ u64 as = 0, ae;
+ void *efi_map_start, *efi_map_end, *p, *q;
+ efi_memory_desc_t *md, *pmd = NULL, *check_md;
+ u64 space_needed, efi_desc_size;
+ unsigned long total_mem = 0;
+
+ efi_map_start = __va(ia64_boot_param->efi_memmap);
+ efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
+ efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+ /*
+ * Worst case: we need 3 kernel descriptors for each efi descriptor
+ * (if every entry has a WB part in the middle, and UC head and tail),
+ * plus one for the end marker.
+ */
+ space_needed = sizeof(kern_memdesc_t) *
+ (3 * (ia64_boot_param->efi_memmap_size/efi_desc_size) + 1);
+
+ for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
+ md = p;
+ if (!efi_wb(md)) {
+ continue;
+ }
+ if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) {
+ contig_low = GRANULEROUNDUP(md->phys_addr);
+ contig_high = efi_md_end(md);
+ for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) {
+ check_md = q;
+ if (!efi_wb(check_md))
+ break;
+ if (contig_high != check_md->phys_addr)
+ break;
+ contig_high = efi_md_end(check_md);
+ }
+ contig_high = GRANULEROUNDDOWN(contig_high);
+ }
+ if (!is_available_memory(md) || md->type == EFI_LOADER_DATA)
+ continue;
+
+ /* Round ends inward to granule boundaries */
+ as = max(contig_low, md->phys_addr);
+ ae = min(contig_high, efi_md_end(md));
+
+ /* keep within max_addr= command line arg */
+ ae = min(ae, max_addr);
+ if (ae <= as)
+ continue;
+
+ /* avoid going over mem= command line arg */
+ if (total_mem + (ae - as) > mem_limit)
+ ae -= total_mem + (ae - as) - mem_limit;
+
+ if (ae <= as)
+ continue;
+
+ if (ae - as > space_needed)
+ break;
+ }
+ if (p >= efi_map_end)
+ panic("Can't allocate space for kernel memory descriptors");
+
+ return __va(as);
+}
+
+/*
+ * Walk the EFI memory map and gather all memory available for kernel
+ * to use. We can allocate partial granules only if the unavailable
+ * parts exist, and are WB.
+ */
+void
+efi_memmap_init(unsigned long *s, unsigned long *e)
+{
+ struct kern_memdesc *k, *prev = 0;
+ u64 contig_low=0, contig_high=0;
+ u64 as, ae, lim;
+ void *efi_map_start, *efi_map_end, *p, *q;
+ efi_memory_desc_t *md, *pmd = NULL, *check_md;
+ u64 efi_desc_size;
+ unsigned long total_mem = 0;
+
+ k = kern_memmap = find_memmap_space();
+
+ efi_map_start = __va(ia64_boot_param->efi_memmap);
+ efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
+ efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+ for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
+ md = p;
+ if (!efi_wb(md)) {
+ if (efi_uc(md) && (md->type == EFI_CONVENTIONAL_MEMORY ||
+ md->type == EFI_BOOT_SERVICES_DATA)) {
+ k->attribute = EFI_MEMORY_UC;
+ k->start = md->phys_addr;
+ k->num_pages = md->num_pages;
+ k++;
+ }
+ continue;
+ }
+ if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) {
+ contig_low = GRANULEROUNDUP(md->phys_addr);
+ contig_high = efi_md_end(md);
+ for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) {
+ check_md = q;
+ if (!efi_wb(check_md))
+ break;
+ if (contig_high != check_md->phys_addr)
+ break;
+ contig_high = efi_md_end(check_md);
+ }
+ contig_high = GRANULEROUNDDOWN(contig_high);
+ }
+ if (!is_available_memory(md))
+ continue;
+
+ /*
+ * Round ends inward to granule boundaries
+ * Give trimmings to uncached allocator
+ */
+ if (md->phys_addr < contig_low) {
+ lim = min(efi_md_end(md), contig_low);
+ if (efi_uc(md)) {
+ if (k > kern_memmap && (k-1)->attribute == EFI_MEMORY_UC &&
+ kmd_end(k-1) == md->phys_addr) {
+ (k-1)->num_pages += (lim - md->phys_addr) >> EFI_PAGE_SHIFT;
+ } else {
+ k->attribute = EFI_MEMORY_UC;
+ k->start = md->phys_addr;
+ k->num_pages = (lim - md->phys_addr) >> EFI_PAGE_SHIFT;
+ k++;
+ }
+ }
+ as = contig_low;
+ } else
+ as = md->phys_addr;
+
+ if (efi_md_end(md) > contig_high) {
+ lim = max(md->phys_addr, contig_high);
+ if (efi_uc(md)) {
+ if (lim == md->phys_addr && k > kern_memmap &&
+ (k-1)->attribute == EFI_MEMORY_UC &&
+ kmd_end(k-1) == md->phys_addr) {
+ (k-1)->num_pages += md->num_pages;
+ } else {
+ k->attribute = EFI_MEMORY_UC;
+ k->start = lim;
+ k->num_pages = (efi_md_end(md) - lim) >> EFI_PAGE_SHIFT;
+ k++;
+ }
+ }
+ ae = contig_high;
+ } else
+ ae = efi_md_end(md);
+
+ /* keep within max_addr= command line arg */
+ ae = min(ae, max_addr);
+ if (ae <= as)
+ continue;
+
+ /* avoid going over mem= command line arg */
+ if (total_mem + (ae - as) > mem_limit)
+ ae -= total_mem + (ae - as) - mem_limit;
+
+ if (ae <= as)
+ continue;
+ if (prev && kmd_end(prev) == md->phys_addr) {
+ prev->num_pages += (ae - as) >> EFI_PAGE_SHIFT;
+ total_mem += ae - as;
+ continue;
+ }
+ k->attribute = EFI_MEMORY_WB;
+ k->start = as;
+ k->num_pages = (ae - as) >> EFI_PAGE_SHIFT;
+ total_mem += ae - as;
+ prev = k++;
+ }
+ k->start = ~0L; /* end-marker */
+
+ /* reserve the memory we are using for kern_memmap */
+ *s = (u64)kern_memmap;
+ *e = (u64)++k;
+}
+
+void
+efi_initialize_iomem_resources(struct resource *code_resource,
+ struct resource *data_resource)
+{
+ struct resource *res;
+ void *efi_map_start, *efi_map_end, *p;
+ efi_memory_desc_t *md;
+ u64 efi_desc_size;
+ char *name;
+ unsigned long flags;
+
+ efi_map_start = __va(ia64_boot_param->efi_memmap);
+ efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
+ efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+ res = NULL;
+
+ for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+ md = p;
+
+ if (md->num_pages == 0) /* should not happen */
+ continue;
+
+ flags = IORESOURCE_MEM;
+ switch (md->type) {
+
+ case EFI_MEMORY_MAPPED_IO:
+ case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
+ continue;
+
+ case EFI_LOADER_CODE:
+ case EFI_LOADER_DATA:
+ case EFI_BOOT_SERVICES_DATA:
+ case EFI_BOOT_SERVICES_CODE:
+ case EFI_CONVENTIONAL_MEMORY:
+ if (md->attribute & EFI_MEMORY_WP) {
+ name = "System ROM";
+ flags |= IORESOURCE_READONLY;
+ } else {
+ name = "System RAM";
+ }
+ break;
+
+ case EFI_ACPI_MEMORY_NVS:
+ name = "ACPI Non-volatile Storage";
+ flags |= IORESOURCE_BUSY;
+ break;
+
+ case EFI_UNUSABLE_MEMORY:
+ name = "reserved";
+ flags |= IORESOURCE_BUSY | IORESOURCE_DISABLED;
+ break;
+
+ case EFI_RESERVED_TYPE:
+ case EFI_RUNTIME_SERVICES_CODE:
+ case EFI_RUNTIME_SERVICES_DATA:
+ case EFI_ACPI_RECLAIM_MEMORY:
+ default:
+ name = "reserved";
+ flags |= IORESOURCE_BUSY;
+ break;
+ }
+
+ if ((res = kcalloc(1, sizeof(struct resource), GFP_KERNEL)) == NULL) {
+ printk(KERN_ERR "failed to alocate resource for iomem\n");
+ return;
+ }
+
+ res->name = name;
+ res->start = md->phys_addr;
+ res->end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
+ res->flags = flags;
+
+ if (insert_resource(&iomem_resource, res) < 0)
+ kfree(res);
+ else {
+ /*
+ * We don't know which region contains
+ * kernel data so we try it repeatedly and
+ * let the resource manager test it.
+ */
+ insert_resource(res, code_resource);
+ insert_resource(res, data_resource);
+ }
+ }
+}
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 205d9802826..d33244c3275 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -57,9 +57,9 @@ int show_interrupts(struct seq_file *p, void *v)
if (i == 0) {
seq_printf(p, " ");
- for (j=0; j<NR_CPUS; j++)
- if (cpu_online(j))
- seq_printf(p, "CPU%d ",j);
+ for_each_online_cpu(j) {
+ seq_printf(p, "CPU%d ",j);
+ }
seq_putc(p, '\n');
}
@@ -72,9 +72,9 @@ int show_interrupts(struct seq_file *p, void *v)
#ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i));
#else
- for (j = 0; j < NR_CPUS; j++)
- if (cpu_online(j))
- seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+ for_each_online_cpu(j) {
+ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+ }
#endif
seq_printf(p, " %14s", irq_desc[i].handler->typename);
seq_printf(p, " %s", action->name);
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index d0a5106fba2..52c47da1724 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -508,9 +508,7 @@ ia64_mca_wakeup_all(void)
int cpu;
/* Clear the Rendez checkin flag for all cpus */
- for(cpu = 0; cpu < NR_CPUS; cpu++) {
- if (!cpu_online(cpu))
- continue;
+ for_each_online_cpu(cpu) {
if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE)
ia64_mca_wakeup(cpu);
}
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index f1aca7cffd1..7a2f0a798d1 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -947,8 +947,8 @@ void
percpu_modcopy (void *pcpudst, const void *src, unsigned long size)
{
unsigned int i;
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_possible(i))
- memcpy(pcpudst + __per_cpu_offset[i], src, size);
+ for_each_cpu(i) {
+ memcpy(pcpudst + __per_cpu_offset[i], src, size);
+ }
}
#endif /* CONFIG_SMP */
diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c
index 367804a605f..6a4ac7d70b3 100644
--- a/arch/ia64/kernel/patch.c
+++ b/arch/ia64/kernel/patch.c
@@ -64,22 +64,30 @@ ia64_patch (u64 insn_addr, u64 mask, u64 val)
void
ia64_patch_imm64 (u64 insn_addr, u64 val)
{
- ia64_patch(insn_addr,
+ /* The assembler may generate offset pointing to either slot 1
+ or slot 2 for a long (2-slot) instruction, occupying slots 1
+ and 2. */
+ insn_addr &= -16UL;
+ ia64_patch(insn_addr + 2,
0x01fffefe000UL, ( ((val & 0x8000000000000000UL) >> 27) /* bit 63 -> 36 */
| ((val & 0x0000000000200000UL) << 0) /* bit 21 -> 21 */
| ((val & 0x00000000001f0000UL) << 6) /* bit 16 -> 22 */
| ((val & 0x000000000000ff80UL) << 20) /* bit 7 -> 27 */
| ((val & 0x000000000000007fUL) << 13) /* bit 0 -> 13 */));
- ia64_patch(insn_addr - 1, 0x1ffffffffffUL, val >> 22);
+ ia64_patch(insn_addr + 1, 0x1ffffffffffUL, val >> 22);
}
void
ia64_patch_imm60 (u64 insn_addr, u64 val)
{
- ia64_patch(insn_addr,
+ /* The assembler may generate offset pointing to either slot 1
+ or slot 2 for a long (2-slot) instruction, occupying slots 1
+ and 2. */
+ insn_addr &= -16UL;
+ ia64_patch(insn_addr + 2,
0x011ffffe000UL, ( ((val & 0x0800000000000000UL) >> 23) /* bit 59 -> 36 */
| ((val & 0x00000000000fffffUL) << 13) /* bit 0 -> 13 */));
- ia64_patch(insn_addr - 1, 0x1fffffffffcUL, val >> 18);
+ ia64_patch(insn_addr + 1, 0x1fffffffffcUL, val >> 18);
}
/*
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index bbb8bc7c055..4b19d041063 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -587,8 +587,9 @@ thread_matches (struct task_struct *thread, unsigned long addr)
static struct task_struct *
find_thread_for_addr (struct task_struct *child, unsigned long addr)
{
- struct task_struct *g, *p;
+ struct task_struct *p;
struct mm_struct *mm;
+ struct list_head *this, *next;
int mm_users;
if (!(mm = get_task_mm(child)))
@@ -600,28 +601,21 @@ find_thread_for_addr (struct task_struct *child, unsigned long addr)
goto out; /* not multi-threaded */
/*
- * First, traverse the child's thread-list. Good for scalability with
- * NPTL-threads.
+ * Traverse the current process' children list. Every task that
+ * one attaches to becomes a child. And it is only attached children
+ * of the debugger that are of interest (ptrace_check_attach checks
+ * for this).
*/
- p = child;
- do {
- if (thread_matches(p, addr)) {
- child = p;
- goto out;
- }
- if (mm_users-- <= 1)
- goto out;
- } while ((p = next_thread(p)) != child);
-
- do_each_thread(g, p) {
- if (child->mm != mm)
+ list_for_each_safe(this, next, &current->children) {
+ p = list_entry(this, struct task_struct, sibling);
+ if (p->mm != mm)
continue;
-
if (thread_matches(p, addr)) {
child = p;
goto out;
}
- } while_each_thread(g, p);
+ }
+
out:
mmput(mm);
return child;
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 1f5c26dbe70..fc56ca2da35 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -78,6 +78,19 @@ struct screen_info screen_info;
unsigned long vga_console_iobase;
unsigned long vga_console_membase;
+static struct resource data_resource = {
+ .name = "Kernel data",
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+static struct resource code_resource = {
+ .name = "Kernel code",
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+extern void efi_initialize_iomem_resources(struct resource *,
+ struct resource *);
+extern char _text[], _end[], _etext[];
+
unsigned long ia64_max_cacheline_size;
unsigned long ia64_iobase; /* virtual address for I/O accesses */
EXPORT_SYMBOL(ia64_iobase);
@@ -171,6 +184,22 @@ sort_regions (struct rsvd_region *rsvd_region, int max)
}
}
+/*
+ * Request address space for all standard resources
+ */
+static int __init register_memory(void)
+{
+ code_resource.start = ia64_tpa(_text);
+ code_resource.end = ia64_tpa(_etext) - 1;
+ data_resource.start = ia64_tpa(_etext);
+ data_resource.end = ia64_tpa(_end) - 1;
+ efi_initialize_iomem_resources(&code_resource, &data_resource);
+
+ return 0;
+}
+
+__initcall(register_memory);
+
/**
* reserve_memory - setup reserved memory areas
*
@@ -211,6 +240,9 @@ reserve_memory (void)
}
#endif
+ efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
+ n++;
+
/* end of memory marker */
rsvd_region[n].start = ~0UL;
rsvd_region[n].end = ~0UL;
@@ -244,28 +276,31 @@ find_initrd (void)
static void __init
io_port_init (void)
{
- extern unsigned long ia64_iobase;
unsigned long phys_iobase;
/*
- * Set `iobase' to the appropriate address in region 6 (uncached access range).
+ * Set `iobase' based on the EFI memory map or, failing that, the
+ * value firmware left in ar.k0.
*
- * The EFI memory map is the "preferred" location to get the I/O port space base,
- * rather the relying on AR.KR0. This should become more clear in future SAL
- * specs. We'll fall back to getting it out of AR.KR0 if no appropriate entry is
- * found in the memory map.
+ * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute
+ * the port's virtual address, so ia32_load_state() loads it with a
+ * user virtual address. But in ia64 mode, glibc uses the
+ * *physical* address in ar.k0 to mmap the appropriate area from
+ * /dev/mem, and the inX()/outX() interfaces use MMIO. In both
+ * cases, user-mode can only use the legacy 0-64K I/O port space.
+ *
+ * ar.k0 is not involved in kernel I/O port accesses, which can use
+ * any of the I/O port spaces and are done via MMIO using the
+ * virtual mmio_base from the appropriate io_space[].
*/
phys_iobase = efi_get_iobase();
- if (phys_iobase)
- /* set AR.KR0 since this is all we use it for anyway */
- ia64_set_kr(IA64_KR_IO_BASE, phys_iobase);
- else {
+ if (!phys_iobase) {
phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);
- printk(KERN_INFO "No I/O port range found in EFI memory map, falling back "
- "to AR.KR0\n");
- printk(KERN_INFO "I/O port base = 0x%lx\n", phys_iobase);
+ printk(KERN_INFO "No I/O port range found in EFI memory map, "
+ "falling back to AR.KR0 (0x%lx)\n", phys_iobase);
}
ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
+ ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
/* setup legacy IO port space */
io_space[0].mmio_base = ia64_iobase;
@@ -526,7 +561,7 @@ show_cpuinfo (struct seq_file *m, void *v)
c->itc_freq / 1000000, c->itc_freq % 1000000,
lpj*HZ/500000, (lpj*HZ/5000) % 100);
#ifdef CONFIG_SMP
- seq_printf(m, "siblings : %u\n", c->num_log);
+ seq_printf(m, "siblings : %u\n", cpus_weight(cpu_core_map[cpunum]));
if (c->threads_per_core > 1 || c->cores_per_socket > 1)
seq_printf(m,
"physical id: %u\n"
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 0166a984709..657ac99a451 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -185,8 +185,8 @@ send_IPI_allbutself (int op)
{
unsigned int i;
- for (i = 0; i < NR_CPUS; i++) {
- if (cpu_online(i) && i != smp_processor_id())
+ for_each_online_cpu(i) {
+ if (i != smp_processor_id())
send_IPI_single(i, op);
}
}
@@ -199,9 +199,9 @@ send_IPI_all (int op)
{
int i;
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_online(i))
- send_IPI_single(i, op);
+ for_each_online_cpu(i) {
+ send_IPI_single(i, op);
+ }
}
/*
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 7d72c0d872b..400a4898712 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -694,9 +694,9 @@ smp_cpus_done (unsigned int dummy)
* Allow the user to impress friends.
*/
- for (cpu = 0; cpu < NR_CPUS; cpu++)
- if (cpu_online(cpu))
- bogosum += cpu_data(cpu)->loops_per_jiffy;
+ for_each_online_cpu(cpu) {
+ bogosum += cpu_data(cpu)->loops_per_jiffy;
+ }
printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
(int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100);
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index 4e9d06c48a8..c6d40446c2c 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -205,23 +205,18 @@ EXPORT_SYMBOL(uncached_free_page);
static int __init
uncached_build_memmap(unsigned long start, unsigned long end, void *arg)
{
- long length;
- unsigned long vstart, vend;
+ long length = end - start;
int node;
- length = end - start;
- vstart = start + __IA64_UNCACHED_OFFSET;
- vend = end + __IA64_UNCACHED_OFFSET;
-
dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end);
- memset((char *)vstart, 0, length);
+ memset((char *)start, 0, length);
- node = paddr_to_nid(start);
+ node = paddr_to_nid(start - __IA64_UNCACHED_OFFSET);
- for (; vstart < vend ; vstart += PAGE_SIZE) {
- dprintk(KERN_INFO "sticking %lx into the pool!\n", vstart);
- gen_pool_free(uncached_pool[node], vstart, PAGE_SIZE);
+ for (; start < end ; start += PAGE_SIZE) {
+ dprintk(KERN_INFO "sticking %lx into the pool!\n", start);
+ gen_pool_free(uncached_pool[node], start, PAGE_SIZE);
}
return 0;