diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2006-07-03 10:25:08 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-07-03 10:25:08 -0400 |
commit | 0a1340c185734a57fbf4775927966ad4a1347b02 (patch) | |
tree | d9ed8f0dd809a7c542a3356601125ea5b5aaa804 /arch/ia64/kernel | |
parent | af18ddb8864b096e3ed4732e2d4b21c956dcfe3a (diff) | |
parent | 29454dde27d8e340bb1987bad9aa504af7081eba (diff) |
Merge rsync://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
include/linux/kernel.h
Diffstat (limited to 'arch/ia64/kernel')
44 files changed, 370 insertions, 314 deletions
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 09a0dbc17fb..0e4553f320b 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq/ obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o +obj-$(CONFIG_AUDIT) += audit.o mca_recovery-y += mca_drv.o mca_drv_asm.o # The gate DSO image is built using a special linker script. diff --git a/arch/ia64/kernel/acpi-ext.c b/arch/ia64/kernel/acpi-ext.c index fff82929d22..2a1ef742e22 100644 --- a/arch/ia64/kernel/acpi-ext.c +++ b/arch/ia64/kernel/acpi-ext.c @@ -8,7 +8,6 @@ * published by the Free Software Foundation. */ -#include <linux/config.h> #include <linux/module.h> #include <linux/types.h> #include <linux/acpi.h> diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 58c93a30348..ccdef199d91 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -32,7 +32,6 @@ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -#include <linux/config.h> #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> @@ -68,8 +67,6 @@ EXPORT_SYMBOL(pm_power_off); unsigned char acpi_kbd_controller_present = 1; unsigned char acpi_legacy_devices; -static unsigned int __initdata acpi_madt_rev; - unsigned int acpi_cpei_override; unsigned int acpi_cpei_phys_cpuid; @@ -243,6 +240,8 @@ acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end) return iosapic_init(iosapic->address, iosapic->global_irq_base); } +static unsigned int __initdata acpi_madt_rev; + static int __init acpi_parse_plat_int_src(acpi_table_entry_header * header, const unsigned long end) @@ -415,9 +414,6 @@ static int __initdata srat_num_cpus; /* number of cpus */ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN]; #define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag)) #define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag)) -/* maps to convert between proximity domain and logical node ID */ -int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS]; -int __initdata nid_to_pxm_map[MAX_NUMNODES]; static struct acpi_table_slit __initdata *slit_table; static int get_processor_proximity_domain(struct acpi_table_processor_affinity *pa) @@ -533,22 +529,17 @@ void __init acpi_numa_arch_fixup(void) * MCD - This can probably be dropped now. No need for pxm ID to node ID * mapping with sparse node numbering iff MAX_PXM_DOMAINS <= MAX_NUMNODES. */ - /* calculate total number of nodes in system from PXM bitmap */ - memset(pxm_to_nid_map, -1, sizeof(pxm_to_nid_map)); - memset(nid_to_pxm_map, -1, sizeof(nid_to_pxm_map)); nodes_clear(node_online_map); for (i = 0; i < MAX_PXM_DOMAINS; i++) { if (pxm_bit_test(i)) { - int nid = num_online_nodes(); - pxm_to_nid_map[i] = nid; - nid_to_pxm_map[nid] = i; + int nid = acpi_map_pxm_to_node(i); node_set_online(nid); } } /* set logical node id in memory chunk structure */ for (i = 0; i < num_node_memblks; i++) - node_memblk[i].nid = pxm_to_nid_map[node_memblk[i].nid]; + node_memblk[i].nid = pxm_to_node(node_memblk[i].nid); /* assign memory bank numbers for each chunk on each node */ for_each_online_node(i) { @@ -562,7 +553,7 @@ void __init acpi_numa_arch_fixup(void) /* set logical node id in cpu structure */ for (i = 0; i < srat_num_cpus; i++) - node_cpuid[i].nid = pxm_to_nid_map[node_cpuid[i].nid]; + node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid); printk(KERN_INFO "Number of logical nodes in system = %d\n", num_online_nodes()); @@ -575,11 +566,11 @@ void __init acpi_numa_arch_fixup(void) for (i = 0; i < slit_table->localities; i++) { if (!pxm_bit_test(i)) continue; - node_from = pxm_to_nid_map[i]; + node_from = pxm_to_node(i); for (j = 0; j < slit_table->localities; j++) { if (!pxm_bit_test(j)) continue; - node_to = pxm_to_nid_map[j]; + node_to = pxm_to_node(j); node_distance(node_from, node_to) = slit_table->entry[i * slit_table->localities + j]; } @@ -626,7 +617,7 @@ EXPORT_SYMBOL(acpi_unregister_gsi); static int __init acpi_parse_fadt(unsigned long phys_addr, unsigned long size) { struct acpi_table_header *fadt_header; - struct fadt_descriptor_rev2 *fadt; + struct fadt_descriptor *fadt; if (!phys_addr || !size) return -EINVAL; @@ -635,7 +626,7 @@ static int __init acpi_parse_fadt(unsigned long phys_addr, unsigned long size) if (fadt_header->revision != 3) return -ENODEV; /* Only deal with ACPI 2.0 FADT */ - fadt = (struct fadt_descriptor_rev2 *)fadt_header; + fadt = (struct fadt_descriptor *)fadt_header; if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER)) acpi_kbd_controller_present = 0; @@ -785,9 +776,9 @@ int acpi_map_cpu2node(acpi_handle handle, int cpu, long physid) /* * Assuming that the container driver would have set the proximity - * domain and would have initialized pxm_to_nid_map[pxm_id] && pxm_flag + * domain and would have initialized pxm_to_node(pxm_id) && pxm_flag */ - node_cpuid[cpu].nid = (pxm_id < 0) ? 0 : pxm_to_nid_map[pxm_id]; + node_cpuid[cpu].nid = (pxm_id < 0) ? 0 : pxm_to_node(pxm_id); node_cpuid[cpu].phys_id = physid; #endif @@ -966,7 +957,7 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret) if (pxm < 0) return AE_OK; - node = pxm_to_nid_map[pxm]; + node = pxm_to_node(pxm); if (node >= MAX_NUMNODES || !node_online(node) || cpus_empty(node_to_cpumask(node))) diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index 77225659e96..75a2a2c1225 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c @@ -5,7 +5,6 @@ */ #define ASM_OFFSETS_C 1 -#include <linux/config.h> #include <linux/sched.h> @@ -217,16 +216,24 @@ void foo(void) DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET, offsetof (struct ia64_mca_cpu, init_stack)); BLANK(); - DEFINE(IA64_SAL_OS_STATE_COMMON_OFFSET, - offsetof (struct ia64_sal_os_state, sal_ra)); DEFINE(IA64_SAL_OS_STATE_OS_GP_OFFSET, offsetof (struct ia64_sal_os_state, os_gp)); - DEFINE(IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET, - offsetof (struct ia64_sal_os_state, pal_min_state)); DEFINE(IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET, offsetof (struct ia64_sal_os_state, proc_state_param)); + DEFINE(IA64_SAL_OS_STATE_SAL_RA_OFFSET, + offsetof (struct ia64_sal_os_state, sal_ra)); + DEFINE(IA64_SAL_OS_STATE_SAL_GP_OFFSET, + offsetof (struct ia64_sal_os_state, sal_gp)); + DEFINE(IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET, + offsetof (struct ia64_sal_os_state, pal_min_state)); + DEFINE(IA64_SAL_OS_STATE_OS_STATUS_OFFSET, + offsetof (struct ia64_sal_os_state, os_status)); + DEFINE(IA64_SAL_OS_STATE_CONTEXT_OFFSET, + offsetof (struct ia64_sal_os_state, context)); DEFINE(IA64_SAL_OS_STATE_SIZE, sizeof (struct ia64_sal_os_state)); + BLANK(); + DEFINE(IA64_PMSA_GR_OFFSET, offsetof (struct pal_min_state_area_s, pmsa_gr)); DEFINE(IA64_PMSA_BANK1_GR_OFFSET, diff --git a/arch/ia64/kernel/audit.c b/arch/ia64/kernel/audit.c new file mode 100644 index 00000000000..f2512931cca --- /dev/null +++ b/arch/ia64/kernel/audit.c @@ -0,0 +1,29 @@ +#include <linux/init.h> +#include <linux/types.h> +#include <linux/audit.h> +#include <asm/unistd.h> + +static unsigned dir_class[] = { +#include <asm-generic/audit_dir_write.h> +~0U +}; + +static unsigned chattr_class[] = { +#include <asm-generic/audit_change_attr.h> +~0U +}; + +static int __init audit_classes_init(void) +{ +#ifdef CONFIG_IA32_SUPPORT + extern __u32 ia32_dir_class[]; + extern __u32 ia32_chattr_class[]; + audit_register_class(AUDIT_CLASS_DIR_WRITE_32, ia32_dir_class); + audit_register_class(AUDIT_CLASS_CHATTR_32, ia32_chattr_class); +#endif + audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); + audit_register_class(AUDIT_CLASS_CHATTR, chattr_class); + return 0; +} + +__initcall(audit_classes_init); diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c index 5a1bf815282..86faf221a07 100644 --- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c +++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c @@ -9,7 +9,6 @@ * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> */ -#include <linux/config.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index 12cfedce73b..b13c0555c3b 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -8,6 +8,8 @@ * Copyright (C) 1999-2003 Hewlett-Packard Co. * David Mosberger-Tang <davidm@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com> + * (c) Copyright 2006 Hewlett-Packard Development Company, L.P. + * Bjorn Helgaas <bjorn.helgaas@hp.com> * * All EFI Runtime Services are not implemented yet as EFI only * supports physical mode addressing on SoftSDV. This is to be fixed @@ -18,7 +20,6 @@ * Goutham Rao: <goutham.rao@intel.com> * Skip non-WB memory and ignore empty memory ranges. */ -#include <linux/config.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> @@ -622,28 +623,20 @@ efi_get_iobase (void) return 0; } -static efi_memory_desc_t * -efi_memory_descriptor (unsigned long phys_addr) +static struct kern_memdesc * +kern_memory_descriptor (unsigned long phys_addr) { - void *efi_map_start, *efi_map_end, *p; - efi_memory_desc_t *md; - u64 efi_desc_size; - - efi_map_start = __va(ia64_boot_param->efi_memmap); - efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; - efi_desc_size = ia64_boot_param->efi_memdesc_size; + struct kern_memdesc *md; - for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { - md = p; - - if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) + for (md = kern_memmap; md->start != ~0UL; md++) { + if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT)) return md; } return 0; } -static int -efi_memmap_has_mmio (void) +static efi_memory_desc_t * +efi_memory_descriptor (unsigned long phys_addr) { void *efi_map_start, *efi_map_end, *p; efi_memory_desc_t *md; @@ -656,8 +649,8 @@ efi_memmap_has_mmio (void) for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { md = p; - if (md->type == EFI_MEMORY_MAPPED_IO) - return 1; + if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) + return md; } return 0; } @@ -683,71 +676,125 @@ efi_mem_attributes (unsigned long phys_addr) } EXPORT_SYMBOL(efi_mem_attributes); -/* - * Determines whether the memory at phys_addr supports the desired - * attribute (WB, UC, etc). If this returns 1, the caller can safely - * access size bytes at phys_addr with the specified attribute. - */ -int -efi_mem_attribute_range (unsigned long phys_addr, unsigned long size, u64 attr) +u64 +efi_mem_attribute (unsigned long phys_addr, unsigned long size) { unsigned long end = phys_addr + size; efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); + u64 attr; + + if (!md) + return 0; + + /* + * EFI_MEMORY_RUNTIME is not a memory attribute; it just tells + * the kernel that firmware needs this region mapped. + */ + attr = md->attribute & ~EFI_MEMORY_RUNTIME; + do { + unsigned long md_end = efi_md_end(md); + + if (end <= md_end) + return attr; + + md = efi_memory_descriptor(md_end); + if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr) + return 0; + } while (md); + return 0; +} + +u64 +kern_mem_attribute (unsigned long phys_addr, unsigned long size) +{ + unsigned long end = phys_addr + size; + struct kern_memdesc *md; + u64 attr; /* - * Some firmware doesn't report MMIO regions in the EFI memory - * map. The Intel BigSur (a.k.a. HP i2000) has this problem. - * On those platforms, we have to assume UC is valid everywhere. + * This is a hack for ioremap calls before we set up kern_memmap. + * Maybe we should do efi_memmap_init() earlier instead. */ - if (!md || (md->attribute & attr) != attr) { - if (attr == EFI_MEMORY_UC && !efi_memmap_has_mmio()) - return 1; + if (!kern_memmap) { + attr = efi_mem_attribute(phys_addr, size); + if (attr & EFI_MEMORY_WB) + return EFI_MEMORY_WB; return 0; } + md = kern_memory_descriptor(phys_addr); + if (!md) + return 0; + + attr = md->attribute; do { - unsigned long md_end = efi_md_end(md); + unsigned long md_end = kmd_end(md); if (end <= md_end) - return 1; + return attr; - md = efi_memory_descriptor(md_end); - if (!md || (md->attribute & attr) != attr) + md = kern_memory_descriptor(md_end); + if (!md || md->attribute != attr) return 0; } while (md); return 0; } +EXPORT_SYMBOL(kern_mem_attribute); -/* - * For /dev/mem, we only allow read & write system calls to access - * write-back memory, because read & write don't allow the user to - * control access size. - */ int valid_phys_addr_range (unsigned long phys_addr, unsigned long size) { - return efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB); + u64 attr; + + /* + * /dev/mem reads and writes use copy_to_user(), which implicitly + * uses a granule-sized kernel identity mapping. It's really + * only safe to do this for regions in kern_memmap. For more + * details, see Documentation/ia64/aliasing.txt. + */ + attr = kern_mem_attribute(phys_addr, size); + if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC) + return 1; + return 0; } -/* - * We allow mmap of anything in the EFI memory map that supports - * either write-back or uncacheable access. For uncacheable regions, - * the supported access sizes are system-dependent, and the user is - * responsible for using the correct size. - * - * Note that this doesn't currently allow access to hot-added memory, - * because that doesn't appear in the boot-time EFI memory map. - */ int valid_mmap_phys_addr_range (unsigned long phys_addr, unsigned long size) { - if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB)) - return 1; + /* + * MMIO regions are often missing from the EFI memory map. + * We must allow mmap of them for programs like X, so we + * currently can't do any useful validation. + */ + return 1; +} - if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_UC)) - return 1; +pgprot_t +phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, + pgprot_t vma_prot) +{ + unsigned long phys_addr = pfn << PAGE_SHIFT; + u64 attr; - return 0; + /* + * For /dev/mem mmap, we use user mappings, but if the region is + * in kern_memmap (and hence may be covered by a kernel mapping), + * we must use the same attribute as the kernel mapping. + */ + attr = kern_mem_attribute(phys_addr, size); + if (attr & EFI_MEMORY_WB) + return pgprot_cacheable(vma_prot); + else if (attr & EFI_MEMORY_UC) + return pgprot_noncached(vma_prot); + + /* + * Some chipsets don't support UC access to memory. If + * WB is supported, we prefer that. + */ + if (efi_mem_attribute(phys_addr, size) & EFI_MEMORY_WB) + return pgprot_cacheable(vma_prot); + + return pgprot_noncached(vma_prot); } int __init diff --git a/arch/ia64/kernel/efi_stub.S b/arch/ia64/kernel/efi_stub.S index 5a7fe70212a..a56e161d751 100644 --- a/arch/ia64/kernel/efi_stub.S +++ b/arch/ia64/kernel/efi_stub.S @@ -61,7 +61,7 @@ GLOBAL_ENTRY(efi_call_phys) or loc3=loc3,r17 mov b6=r2 ;; - andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared + andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared br.call.sptk.many rp=ia64_switch_mode_phys .ret0: mov out4=in5 mov out0=in1 diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index bcb80ca5cf4..12701cf32d9 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -31,7 +31,6 @@ * pNonSys: !pSys */ -#include <linux/config.h> #include <asm/asmmacro.h> #include <asm/cache.h> @@ -1584,7 +1583,7 @@ sys_call_table: data8 sys_keyctl data8 sys_ioprio_set data8 sys_ioprio_get // 1275 - data8 sys_ni_syscall + data8 sys_move_pages data8 sys_inotify_init data8 sys_inotify_add_watch data8 sys_inotify_rm_watch diff --git a/arch/ia64/kernel/entry.h b/arch/ia64/kernel/entry.h index 78eeb079341..b83edac0296 100644 --- a/arch/ia64/kernel/entry.h +++ b/arch/ia64/kernel/entry.h @@ -1,4 +1,3 @@ -#include <linux/config.h> /* * Preserved registers that are shared between code in ivt.S and @@ -23,6 +22,7 @@ #define PT(f) (IA64_PT_REGS_##f##_OFFSET) #define SW(f) (IA64_SWITCH_STACK_##f##_OFFSET) +#define SOS(f) (IA64_SAL_OS_STATE_##f##_OFFSET) #define PT_REGS_SAVES(off) \ .unwabi 3, 'i'; \ diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S index 86064ca9895..3274850cf27 100644 --- a/arch/ia64/kernel/gate.S +++ b/arch/ia64/kernel/gate.S @@ -6,7 +6,6 @@ * David Mosberger-Tang <davidm@hpl.hp.com> */ -#include <linux/config.h> #include <asm/asmmacro.h> #include <asm/errno.h> diff --git a/arch/ia64/kernel/gate.lds.S b/arch/ia64/kernel/gate.lds.S index 7c99e6ec3da..cc35cddfd4c 100644 --- a/arch/ia64/kernel/gate.lds.S +++ b/arch/ia64/kernel/gate.lds.S @@ -4,7 +4,6 @@ * in one page). This script controls its layout. */ -#include <linux/config.h> #include <asm/system.h> diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index f1778a84ea6..561b8f1d3bc 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S @@ -19,7 +19,6 @@ * Support for CPU Hotplug */ -#include <linux/config.h> #include <asm/asmmacro.h> #include <asm/fpu.h> diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c index bbcfd08378a..b7cf651ceb1 100644 --- a/arch/ia64/kernel/ia64_ksyms.c +++ b/arch/ia64/kernel/ia64_ksyms.c @@ -5,7 +5,6 @@ * All other exports should be put directly after the definition. */ -#include <linux/config.h> #include <linux/module.h> #include <linux/string.h> diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index d58c1c5c903..9bf15fefa7e 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -79,7 +79,6 @@ * describeinterrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ * (isa_irq) is the only exception in this source code. */ -#include <linux/config.h> #include <linux/acpi.h> #include <linux/init.h> @@ -456,7 +455,7 @@ iosapic_startup_edge_irq (unsigned int irq) static void iosapic_ack_edge_irq (unsigned int irq) { - irq_desc_t *idesc = irq_descp(irq); + irq_desc_t *idesc = irq_desc + irq; move_native_irq(irq); /* @@ -659,14 +658,14 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery, else irq_type = &irq_type_iosapic_level; - idesc = irq_descp(vector); - if (idesc->handler != irq_type) { - if (idesc->handler != &no_irq_type) + idesc = irq_desc + vector; + if (idesc->chip != irq_type) { + if (idesc->chip != &no_irq_type) printk(KERN_WARNING "%s: changing vector %d from %s to %s\n", __FUNCTION__, vector, - idesc->handler->typename, irq_type->typename); - idesc->handler = irq_type; + idesc->chip->typename, irq_type->typename); + idesc->chip = irq_type; } return 0; } @@ -793,14 +792,14 @@ again: return -ENOSPC; } - spin_lock_irqsave(&irq_descp(vector)->lock, flags); + spin_lock_irqsave(&irq_desc[vector].lock, flags); spin_lock(&iosapic_lock); { if (gsi_to_vector(gsi) > 0) { if (list_empty(&iosapic_intr_info[vector].rtes)) free_irq_vector(vector); spin_unlock(&iosapic_lock); - spin_unlock_irqrestore(&irq_descp(vector)->lock, + spin_unlock_irqrestore(&irq_desc[vector].lock, flags); goto again; } @@ -810,7 +809,7 @@ again: polarity, trigger); if (err < 0) { spin_unlock(&iosapic_lock); - spin_unlock_irqrestore(&irq_descp(vector)->lock, + spin_unlock_irqrestore(&irq_desc[vector].lock, flags); return err; } @@ -825,7 +824,7 @@ again: set_rte(gsi, vector, dest, mask); } spin_unlock(&iosapic_lock); - spin_unlock_irqrestore(&irq_descp(vector)->lock, flags); + spin_unlock_irqrestore(&irq_desc[vector].lock, flags); printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n", gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"), @@ -860,7 +859,7 @@ iosapic_unregister_intr (unsigned int gsi) } vector = irq_to_vector(irq); - idesc = irq_descp(irq); + idesc = irq_desc + irq; spin_lock_irqsave(&idesc->lock, flags); spin_lock(&iosapic_lock); { @@ -903,7 +902,7 @@ iosapic_unregister_intr (unsigned int gsi) BUG_ON(iosapic_intr_info[vector].count); /* Clear the interrupt controller descriptor */ - idesc->handler = &no_irq_type; + idesc->chip = &no_irq_type; /* Clear the interrupt information */ memset(&iosapic_intr_info[vector], 0, diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 9c72ea3f643..7852382de2f 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -76,7 +76,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); } #endif - seq_printf(p, " %14s", irq_desc[i].handler->typename); + seq_printf(p, " %14s", irq_desc[i].chip->typename); seq_printf(p, " %s", action->name); for (action=action->next; action; action = action->next) @@ -100,7 +100,7 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir) cpu_set(cpu_logical_id(hwid), mask); if (irq < NR_IRQS) { - irq_affinity[irq] = mask; + irq_desc[irq].affinity = mask; irq_redir[irq] = (char) (redir & 0xff); } } @@ -120,7 +120,7 @@ static void migrate_irqs(void) int irq, new_cpu; for (irq=0; irq < NR_IRQS; irq++) { - desc = irq_descp(irq); + desc = irq_desc + irq; /* * No handling for now. @@ -131,7 +131,7 @@ static void migrate_irqs(void) if (desc->status == IRQ_PER_CPU) continue; - cpus_and(mask, irq_affinity[irq], cpu_online_map); + cpus_and(mask, irq_desc[irq].affinity, cpu_online_map); if (any_online_cpu(mask) == NR_CPUS) { /* * Save it for phase 2 processing @@ -144,15 +144,15 @@ static void migrate_irqs(void) /* * Al three are essential, currently WARN_ON.. maybe panic? */ - if (desc->handler && desc->handler->disable && - desc->handler->enable && desc->handler->set_affinity) { - desc->handler->disable(irq); - desc->handler->set_affinity(irq, mask); - desc->handler->enable(irq); + if (desc->chip && desc->chip->disable && + desc->chip->enable && desc->chip->set_affinity) { + desc->chip->disable(irq); + desc->chip->set_affinity(irq, mask); + desc->chip->enable(irq); } else { - WARN_ON((!(desc->handler) || !(desc->handler->disable) || - !(desc->handler->enable) || - !(desc->handler->set_affinity))); + WARN_ON((!(desc->chip) || !(desc->chip->disable) || + !(desc->chip->enable) || + !(desc->chip->set_affinity))); } } } diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 6c4d59fd036..a041367f043 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -14,7 +14,6 @@ * Added CPU Hotplug handling for IPF. */ -#include <linux/config.h> #include <linux/module.h> #include <linux/jiffies.h> @@ -46,6 +45,10 @@ #define IRQ_DEBUG 0 +/* These can be overridden in platform_irq_init */ +int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR; +int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR; + /* default base addr of IPI table */ void __iomem *ipi_base_addr = ((void __iomem *) (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR)); @@ -60,7 +63,7 @@ __u8 isa_irq_to_vector_map[16] = { }; EXPORT_SYMBOL(isa_irq_to_vector_map); -static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)]; +static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_MAX_DEVICE_VECTORS)]; int assign_irq_vector (int irq) @@ -89,6 +92,19 @@ free_irq_vector (int vector) printk(KERN_WARNING "%s: double free!\n", __FUNCTION__); } +int +reserve_irq_vector (int vector) +{ + int pos; + + if (vector < IA64_FIRST_DEVICE_VECTOR || + vector > IA64_LAST_DEVICE_VECTOR) + return -EINVAL; + + pos = vector - IA64_FIRST_DEVICE_VECTOR; + return test_and_set_bit(pos, ia64_vector_mask); +} + #ifdef CONFIG_SMP # define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE) #else @@ -219,7 +235,7 @@ extern irqreturn_t handle_IPI (int irq, void *dev_id, struct pt_regs *regs); static struct irqaction ipi_irqaction = { .handler = handle_IPI, - .flags = SA_INTERRUPT, + .flags = IRQF_DISABLED, .name = "IPI" }; #endif @@ -232,9 +248,9 @@ register_percpu_irq (ia64_vector vec, struct irqaction *action) for (irq = 0; irq < NR_IRQS; ++irq) if (irq_to_vector(irq) == vec) { - desc = irq_descp(irq); + desc = irq_desc + irq; desc->status |= IRQ_PER_CPU; - desc->handler = &irq_type_ia64_lsapic; + desc->chip = &irq_type_ia64_lsapic; if (action) setup_irq(irq, action); } diff --git a/arch/ia64/kernel/irq_lsapic.c b/arch/ia64/kernel/irq_lsapic.c index ea14e6a0440..1ab58b09f3d 100644 --- a/arch/ia64/kernel/irq_lsapic.c +++ b/arch/ia64/kernel/irq_lsapic.c @@ -26,6 +26,13 @@ lsapic_noop (unsigned int irq) /* nuthing to do... */ } +static int lsapic_retrigger(unsigned int irq) +{ + ia64_resend_irq(irq); + + return 1; +} + struct hw_interrupt_type irq_type_ia64_lsapic = { .typename = "LSAPIC", .startup = lsapic_noop_startup, @@ -33,5 +40,6 @@ struct hw_interrupt_type irq_type_ia64_lsapic = { .enable = lsapic_noop, .disable = lsapic_noop, .ack = lsapic_noop, - .end = lsapic_noop + .end = lsapic_noop, + .retrigger = lsapic_retrigger, }; diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index 829a43cab79..6b7fcbd3f6f 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S @@ -38,7 +38,6 @@ * Table is based upon EAS2.6 (Oct 1999) */ -#include <linux/config.h> #include <asm/asmmacro.h> #include <asm/break.h> diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index f9039f88d01..00d9c83b802 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -23,7 +23,6 @@ * <anil.s.keshavamurthy@intel.com> adapted from i386 */ -#include <linux/config.h> #include <linux/kprobes.h> #include <linux/ptrace.h> #include <linux/string.h> diff --git a/arch/ia64/kernel/machvec.c b/arch/ia64/kernel/machvec.c index 4b0b71d5aef..d4a546aa504 100644 --- a/arch/ia64/kernel/machvec.c +++ b/arch/ia64/kernel/machvec.c @@ -1,4 +1,3 @@ -#include <linux/config.h> #include <linux/module.h> #include <asm/machvec.h> diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 6a0880639bc..eb8e8dc5ac8 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -55,7 +55,6 @@ * 2005-10-07 Keith Owens <kaos@sgi.com> * Add notify_die() hooks. */ -#include <linux/config.h> #include <linux/types.h> #include <linux/init.h> #include <linux/sched.h> @@ -1458,38 +1457,38 @@ __setup("disable_cpe_poll", ia64_mca_disable_cpe_polling); static struct irqaction cmci_irqaction = { .handler = ia64_mca_cmc_int_handler, - .flags = SA_INTERRUPT, + .flags = IRQF_DISABLED, .name = "cmc_hndlr" }; static struct irqaction cmcp_irqaction = { .handler = ia64_mca_cmc_int_caller, - .flags = SA_INTERRUPT, + .flags = IRQF_DISABLED, .name = "cmc_poll" }; static struct irqaction mca_rdzv_irqaction = { .handler = ia64_mca_rendez_int_handler, - .flags = SA_INTERRUPT, + .flags = IRQF_DISABLED, .name = "mca_rdzv" }; static struct irqaction mca_wkup_irqaction = { .handler = ia64_mca_wakeup_int_handler, - .flags = SA_INTERRUPT, + .flags = IRQF_DISABLED, .name = "mca_wkup" }; #ifdef CONFIG_ACPI static struct irqaction mca_cpe_irqaction = { .handler = ia64_mca_cpe_int_handler, - .flags = SA_INTERRUPT, + .flags = IRQF_DISABLED, .name = "cpe_hndlr" }; static struct irqaction mca_cpep_irqaction = { .handler = ia64_mca_cpe_int_caller, - .flags = SA_INTERRUPT, + .flags = IRQF_DISABLED, .name = "cpe_poll" }; #endif /* CONFIG_ACPI */ @@ -1788,7 +1787,7 @@ ia64_mca_late_init(void) cpe_poll_enabled = 0; for (irq = 0; irq < NR_IRQS; ++irq) if (irq_to_vector(irq) == cpe_vector) { - desc = irq_descp(irq); + desc = irq_desc + irq; desc->status |= IRQ_PER_CPU; setup_irq(irq, &mca_cpe_irqaction); ia64_cpe_irq = irq; diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index 6dff024cd62..96047491d1b 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S @@ -19,7 +19,6 @@ // 12/08/05 Keith Owens <kaos@sgi.com> // Use per cpu MCA/INIT stacks for all data. // -#include <linux/config.h> #include <linux/threads.h> #include <asm/asmmacro.h> @@ -159,7 +158,7 @@ ia64_os_mca_spin: GET_IA64_MCA_DATA(r2) // Using MCA stack, struct ia64_sal_os_state, variable proc_state_param ;; - add r3=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET, r2 + add r3=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET+SOS(PROC_STATE_PARAM), r2 ;; ld8 r18=[r3] // Get processor state parameter on existing PALE_CHECK. ;; @@ -479,9 +478,11 @@ ia64_state_save: st8 [temp2]=r11,16 // rv_rc mov r11=cr.iipa ;; - st8 [temp1]=r18,16 // proc_state_param - st8 [temp2]=r19,16 // monarch + st8 [temp1]=r18 // proc_state_param + st8 [temp2]=r19 // monarch mov r6=IA64_KR(CURRENT) + add temp1=SOS(SAL_RA), regs + add temp2=SOS(SAL_GP), regs ;; st8 [temp1]=r12,16 // sal_ra st8 [temp2]=r10,16 // sal_gp @@ -503,12 +504,14 @@ ia64_state_save: st8 [temp2]=r11,16 // cr.iipa mov r12=cr.iim ;; - st8 [temp1]=r12,16 // cr.iim + st8 [temp1]=r12 // cr.iim (p1) mov r12=IA64_MCA_COLD_BOOT (p2) mov r12=IA64_INIT_WARM_BOOT mov r6=cr.iha + add temp1=SOS(OS_STATUS), regs ;; - st8 [temp2]=r6,16 // cr.iha + st8 [temp2]=r6 // cr.iha + add temp2=SOS(CONTEXT), regs st8 [temp1]=r12 // os_status, default is cold boot mov r6=IA64_MCA_SAME_CONTEXT ;; @@ -820,8 +823,8 @@ ia64_state_restore: // Restore the SAL to OS state. The previous code left regs at pt_regs. add regs=MCA_SOS_OFFSET-MCA_PT_REGS_OFFSET, regs ;; - add temp1=IA64_SAL_OS_STATE_COMMON_OFFSET, regs - add temp2=IA64_SAL_OS_STATE_COMMON_OFFSET+8, regs + add temp1=SOS(SAL_RA), regs + add temp2=SOS(SAL_GP), regs ;; ld8 r12=[temp1],16 // sal_ra ld8 r9=[temp2],16 // sal_gp @@ -842,8 +845,10 @@ ia64_state_restore: ;; mov cr.itir=temp3 mov cr.iipa=temp4 - ld8 temp3=[temp1],16 // cr.iim - ld8 temp4=[temp2],16 // cr.iha + ld8 temp3=[temp1] // cr.iim + ld8 temp4=[temp2] // cr.iha + add temp1=SOS(OS_STATUS), regs + add temp2=SOS(CONTEXT), regs ;; mov cr.iim=temp3 mov cr.iha=temp4 @@ -916,7 +921,7 @@ ia64_state_restore: ia64_new_stack: add regs=MCA_PT_REGS_OFFSET, r3 - add temp2=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET, r3 + add temp2=MCA_SOS_OFFSET+SOS(PAL_MIN_STATE), r3 mov b0=r2 // save return address GET_IA64_MCA_DATA(temp1) invala @@ -1020,7 +1025,7 @@ ia64_old_stack: ia64_set_kernel_registers: add temp3=MCA_SP_OFFSET, r3 - add temp4=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_OS_GP_OFFSET, r3 + add temp4=MCA_SOS_OFFSET+SOS(OS_GP), r3 mov b0=r2 // save return address GET_IA64_MCA_DATA(temp1) ;; diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index ca6666b51cc..8db6e0cedad 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c @@ -8,7 +8,6 @@ * Copyright (C) 2005 Keith Owens <kaos@sgi.com> * Copyright (C) 2006 Russ Anderson <rja@sgi.com> */ -#include <linux/config.h> #include <linux/types.h> #include <linux/init.h> #include <linux/sched.h> diff --git a/arch/ia64/kernel/mca_drv_asm.S b/arch/ia64/kernel/mca_drv_asm.S index e6a580d354b..f2d4900751b 100644 --- a/arch/ia64/kernel/mca_drv_asm.S +++ b/arch/ia64/kernel/mca_drv_asm.S @@ -5,7 +5,6 @@ * Copyright (C) 2004 FUJITSU LIMITED * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) */ -#include <linux/config.h> #include <linux/threads.h> #include <asm/asmmacro.h> diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h index 85ed54179af..c9ac8bada78 100644 --- a/arch/ia64/kernel/minstate.h +++ b/arch/ia64/kernel/minstate.h @@ -1,4 +1,3 @@ -#include <linux/config.h> #include <asm/cache.h> diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index 3a30cfc9574..158e3c51bb7 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c @@ -25,7 +25,6 @@ SEGREL64LSB */ -#include <linux/config.h> #include <linux/kernel.h> #include <linux/sched.h> diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c index 0766493d4d0..1cc360c83e7 100644 --- a/arch/ia64/kernel/numa.c +++ b/arch/ia64/kernel/numa.c @@ -19,7 +19,6 @@ * Copyright (C) 2004 Silicon Graphics, Inc. * Jesse Barnes <jbarnes@sgi.com> */ -#include <linux/config.h> #include <linux/topology.h> #include <linux/module.h> #include <asm/processor.h> diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 859fb37ff49..3f5bac59209 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c @@ -17,7 +17,6 @@ * 10/23/2001 S.Eranian updated pal_perf_mon_info bug fixes * 03/24/2004 Ashok Raj updated to work with CPU Hotplug */ -#include <linux/config.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/init.h> @@ -959,7 +958,7 @@ remove_palinfo_proc_entries(unsigned int hcpu) } } -static int palinfo_cpu_callback(struct notifier_block *nfb, +static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { @@ -978,7 +977,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block palinfo_cpu_notifier = +static struct notifier_block __cpuinitdata palinfo_cpu_notifier = { .notifier_call = palinfo_cpu_callback, .priority = 0, @@ -998,7 +997,7 @@ palinfo_init(void) } /* Register for future delivery via notify registration */ - register_cpu_notifier(&palinfo_cpu_notifier); + register_hotcpu_notifier(&palinfo_cpu_notifier); return 0; } diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 077f21216b6..c7ccd6ee1dd 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -19,7 +19,6 @@ * http://www.hpl.hp.com/research/linux/perfmon */ -#include <linux/config.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> @@ -532,7 +531,6 @@ static ctl_table pfm_sysctl_root[] = { static struct ctl_table_header *pfm_sysctl_header; static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs); -static int pfm_flush(struct file *filp); #define pfm_get_cpu_var(v) __ia64_per_cpu_var(v) #define pfm_get_cpu_data(a,b) per_cpu(a, b) @@ -595,10 +593,11 @@ pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, } -static struct super_block * -pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) +static int +pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, + struct vfsmount *mnt) { - return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC); + return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC, mnt); } static struct file_system_type pfm_fs_type = { @@ -1773,7 +1772,7 @@ pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx) * When caller is self-monitoring, the context is unloaded. */ static int -pfm_flush(struct file *filp) +pfm_flush(struct file *filp, fl_owner_t id) { pfm_context_t *ctx; struct task_struct *task; @@ -6165,7 +6164,7 @@ pfm_load_regs (struct task_struct *task) /* * will replay the PMU interrupt */ - if (need_irq_resend) hw_resend_irq(NULL, IA64_PERFMON_VECTOR); + if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR); pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++; } @@ -6305,7 +6304,7 @@ pfm_load_regs (struct task_struct *task) /* * will replay the PMU interrupt */ - if (need_irq_resend) hw_resend_irq(NULL, IA64_PERFMON_VECTOR); + if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR); pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++; } @@ -6440,7 +6439,7 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx) static struct irqaction perfmon_irqaction = { .handler = pfm_interrupt_handler, - .flags = SA_INTERRUPT, + .flags = IRQF_DISABLED, .name = "perfmon" }; diff --git a/arch/ia64/kernel/perfmon_default_smpl.c b/arch/ia64/kernel/perfmon_default_smpl.c index 344941db0a9..ff80eab83b3 100644 --- a/arch/ia64/kernel/perfmon_default_smpl.c +++ b/arch/ia64/kernel/perfmon_default_smpl.c @@ -8,7 +8,6 @@ #include <linux/kernel.h> #include <linux/types.h> #include <linux/module.h> -#include <linux/config.h> #include <linux/init.h> #include <asm/delay.h> #include <linux/smp.h> diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 355d57970ba..ea914cc6812 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -9,7 +9,6 @@ * Add notify_die() hooks. */ #define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */ -#include <linux/config.h> #include <linux/cpu.h> #include <linux/pm.h> @@ -272,9 +271,9 @@ cpu_idle (void) /* endless idle loop with no priority at all */ while (1) { if (can_do_pal_halt) - clear_thread_flag(TIF_POLLING_NRFLAG); + current_thread_info()->status &= ~TS_POLLING; else - set_thread_flag(TIF_POLLING_NRFLAG); + current_thread_info()->status |= TS_POLLING; if (!need_resched()) { void (*idle)(void); diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index e61e15e28d8..aa705e46b97 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -6,7 +6,6 @@ * * Derived from the x86 and Alpha versions. */ -#include <linux/config.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c index 056f7a6eedc..642fdc7b969 100644 --- a/arch/ia64/kernel/sal.c +++ b/arch/ia64/kernel/sal.c @@ -6,7 +6,6 @@ * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> */ -#include <linux/config.h> #include <linux/kernel.h> #include <linux/init.h> @@ -227,7 +226,7 @@ static int sal_cache_flush_drops_interrupts; static void __init check_sal_cache_flush (void) { - unsigned long flags, itv; + unsigned long flags; int cpu; u64 vector; @@ -238,9 +237,6 @@ check_sal_cache_flush (void) * Schedule a timer interrupt, wait until it's reported, and see if * SAL_CACHE_FLUSH drops it. */ - itv = ia64_get_itv(); - BUG_ON((itv & (1 << 16)) == 0); - ia64_set_itv(IA64_TIMER_VECTOR); ia64_set_itm(ia64_get_itc() + 1000); @@ -260,7 +256,6 @@ check_sal_cache_flush (void) ia64_eoi(); } - ia64_set_itv(itv); local_irq_restore(flags); put_cpu(); } diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index 663a186ad19..9065f0f01ba 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c @@ -572,7 +572,7 @@ static struct file_operations salinfo_data_fops = { }; #ifdef CONFIG_HOTPLUG_CPU -static int +static int __devinit salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) { unsigned int i, cpu = (unsigned long)hcpu; @@ -673,9 +673,7 @@ salinfo_init(void) salinfo_timer.function = &salinfo_timeout; add_timer(&salinfo_timer); -#ifdef CONFIG_HOTPLUG_CPU - register_cpu_notifier(&salinfo_cpu_notifier); -#endif + register_hotcpu_notifier(&salinfo_cpu_notifier); return 0; } diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index e4dfda1eb7d..6a33f414de5 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -22,7 +22,6 @@ * 06/24/99 W.Drummond added boot_cpu_data. * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()" */ -#include <linux/config.h> #include <linux/module.h> #include <linux/init.h> @@ -260,6 +259,7 @@ reserve_memory (void) n++; num_rsvd_regions = n; + BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n); sort_regions(rsvd_region, num_rsvd_regions); } diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index 1d7903ee212..77f8b49c788 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -7,7 +7,6 @@ * Derived from i386 and Alpha versions. */ -#include <linux/config.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/mm.h> diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 44e9547878a..e1960979be2 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -21,7 +21,6 @@ * 05/01/30 Suresh Siddha <suresh.b.siddha@intel.com> * Setup cpu_sibling_map and cpu_core_map */ -#include <linux/config.h> #include <linux/module.h> #include <linux/acpi.h> @@ -677,16 +676,16 @@ int migrate_platform_irqs(unsigned int cpu) new_cpei_cpu = any_online_cpu(cpu_online_map); mask = cpumask_of_cpu(new_cpei_cpu); set_cpei_target_cpu(new_cpei_cpu); - desc = irq_descp(ia64_cpe_irq); + desc = irq_desc + ia64_cpe_irq; /* * Switch for now, immediatly, we need to do fake intr * as other interrupts, but need to study CPEI behaviour with * polling before making changes. */ if (desc) { - desc->handler->disable(ia64_cpe_irq); - desc->handler->set_affinity(ia64_cpe_irq, mask); - desc->handler->enable(ia64_cpe_irq); + desc->chip->disable(ia64_cpe_irq); + desc->chip->set_affinity(ia64_cpe_irq, mask); + desc->chip->enable(ia64_cpe_irq); printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu); } } diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index c7b943f1019..40722d88607 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c @@ -5,7 +5,6 @@ * Copyright (C) 1999-2000, 2002-2003, 2005 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> */ -#include <linux/config.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/mm.h> diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 49958904045..6928ef0d64d 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -8,7 +8,6 @@ * Copyright (C) 1999-2000 VA Linux Systems * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com> */ -#include <linux/config.h> #include <linux/cpu.h> #include <linux/init.h> @@ -232,7 +231,7 @@ ia64_init_itm (void) static struct irqaction timer_irqaction = { .handler = timer_interrupt, - .flags = SA_INTERRUPT, + .flags = IRQF_DISABLED, .name = "timer" }; diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index 4f3a16b37f8..b146f1cfad3 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -13,7 +13,6 @@ * Populate cpu cache entries in sysfs for cpu cache info */ -#include <linux/config.h> #include <linux/cpu.h> #include <linux/kernel.h> #include <linux/mm.h> @@ -26,19 +25,10 @@ #include <asm/numa.h> #include <asm/cpu.h> -#ifdef CONFIG_NUMA -static struct node *sysfs_nodes; -#endif static struct ia64_cpu *sysfs_cpus; int arch_register_cpu(int num) { - struct node *parent = NULL; - -#ifdef CONFIG_NUMA - parent = &sysfs_nodes[cpu_to_node(num)]; -#endif /* CONFIG_NUMA */ - #if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU) /* * If CPEI cannot be re-targetted, and this is @@ -48,21 +38,14 @@ int arch_register_cpu(int num) sysfs_cpus[num].cpu.no_control = 1; #endif - return register_cpu(&sysfs_cpus[num].cpu, num, parent); + return register_cpu(&sysfs_cpus[num].cpu, num); } #ifdef CONFIG_HOTPLUG_CPU void arch_unregister_cpu(int num) { - struct node *parent = NULL; - -#ifdef CONFIG_NUMA - int node = cpu_to_node(num); - parent = &sysfs_nodes[node]; -#endif /* CONFIG_NUMA */ - - return unregister_cpu(&sysfs_cpus[num].cpu, parent); + return unregister_cpu(&sysfs_cpus[num].cpu); } EXPORT_SYMBOL(arch_register_cpu); EXPORT_SYMBOL(arch_unregister_cpu); @@ -74,17 +57,11 @@ static int __init topology_init(void) int i, err = 0; #ifdef CONFIG_NUMA - sysfs_nodes = kzalloc(sizeof(struct node) * MAX_NUMNODES, GFP_KERNEL); - if (!sysfs_nodes) { - err = -ENOMEM; - goto out; - } - /* * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes? */ for_each_online_node(i) { - if ((err = register_node(&sysfs_nodes[i], i, 0))) + if ((err = register_one_node(i))) goto out; } #endif @@ -166,7 +143,7 @@ static void cache_shared_cpu_map_setup( unsigned int cpu, num_shared = (int) csi.num_shared; do { - for_each_cpu(j) + for_each_possible_cpu(j) if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id && cpu_data(j)->core_id == csi.log1_cid && cpu_data(j)->thread_id == csi.log1_tid) @@ -426,7 +403,7 @@ static int __cpuinit cache_remove_dev(struct sys_device * sys_dev) * When a cpu is hot-plugged, do a check and initiate * cache kobject if necessary */ -static int cache_cpu_callback(struct notifier_block *nfb, +static int __cpuinit cache_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; @@ -444,7 +421,7 @@ static int cache_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block cache_cpu_notifier = +static struct notifier_block __cpuinitdata cache_cpu_notifier = { .notifier_call = cache_cpu_callback }; diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index 7c1ddc8ac44..e7bbb0f40aa 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -7,7 +7,6 @@ * 05/12/00 grao <goutham.rao@intel.com> : added isr in siginfo for SIGFPE */ -#include <linux/config.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/sched.h> diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index fcd2bad0286..5f03b9e524d 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2001-2005 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License @@ -29,15 +29,8 @@ #include <asm/tlbflush.h> #include <asm/sn/arch.h> -#define DEBUG 0 -#if DEBUG -#define dprintk printk -#else -#define dprintk(x...) do { } while (0) -#endif - -void __init efi_memmap_walk_uc (efi_freemem_callback_t callback); +extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *); #define MAX_UNCACHED_GRANULES 5 static int allocated_granules; @@ -60,6 +53,7 @@ static void uncached_ipi_visibility(void *data) static void uncached_ipi_mc_drain(void *data) { int status; + status = ia64_pal_mc_drain(); if (status) printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on " @@ -67,30 +61,35 @@ static void uncached_ipi_mc_drain(void *data) } -static unsigned long -uncached_get_new_chunk(struct gen_pool *poolp) +/* + * Add a new chunk of uncached memory pages to the specified pool. + * + * @pool: pool to add new chunk of uncached memory to + * @nid: node id of node to allocate memory from, or -1 + * + * This is accomplished by first allocating a granule of cached memory pages + * and then converting them to uncached memory pages. + */ +static int uncached_add_chunk(struct gen_pool *pool, int nid) { struct page *page; - void *tmp; int status, i; - unsigned long addr, node; + unsigned long c_addr, uc_addr; if (allocated_granules >= MAX_UNCACHED_GRANULES) - return 0; + return -1; + + /* attempt to allocate a granule's worth of cached memory pages */ - node = poolp->private; - page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, + page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO, IA64_GRANULE_SHIFT-PAGE_SHIFT); + if (!page) + return -1; - dprintk(KERN_INFO "get_new_chunk page %p, addr %lx\n", - page, (unsigned long)(page-vmem_map) << PAGE_SHIFT); + /* convert the memory pages from cached to uncached */ - /* - * Do magic if no mem on local node! XXX - */ - if (!page) - return 0; - tmp = page_address(page); + c_addr = (unsigned long)page_address(page); + uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET; /* * There's a small race here where it's possible for someone to @@ -100,76 +99,90 @@ uncached_get_new_chunk(struct gen_pool *poolp) for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) SetPageUncached(&page[i]); - flush_tlb_kernel_range(tmp, tmp + IA64_GRANULE_SIZE); + flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE); status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); - - dprintk(KERN_INFO "pal_prefetch_visibility() returns %i on cpu %i\n", - status, raw_smp_processor_id()); - if (!status) { status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1); if (status) - printk(KERN_WARNING "smp_call_function failed for " - "uncached_ipi_visibility! (%i)\n", status); + goto failed; } + preempt_disable(); + if (ia64_platform_is("sn2")) - sn_flush_all_caches((unsigned long)tmp, IA64_GRANULE_SIZE); + sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE); else - flush_icache_range((unsigned long)tmp, - (unsigned long)tmp+IA64_GRANULE_SIZE); + flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE); + + /* flush the just introduced uncached translation from the TLB */ + local_flush_tlb_all(); + + preempt_enable(); ia64_pal_mc_drain(); status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1); if (status) - printk(KERN_WARNING "smp_call_function failed for " - "uncached_ipi_mc_drain! (%i)\n", status); + goto failed; - addr = (unsigned long)tmp - PAGE_OFFSET + __IA64_UNCACHED_OFFSET; + /* + * The chunk of memory pages has been converted to uncached so now we + * can add it to the pool. + */ + status = gen_pool_add(pool, uc_addr, IA64_GRANULE_SIZE, nid); + if (status) + goto failed; allocated_granules++; - return addr; + return 0; + + /* failed to convert or add the chunk so give it back to the kernel */ +failed: + for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) + ClearPageUncached(&page[i]); + + free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT); + return -1; } /* * uncached_alloc_page * + * @starting_nid: node id of node to start with, or -1 + * * Allocate 1 uncached page. Allocates on the requested node. If no * uncached pages are available on the requested node, roundrobin starting - * with higher nodes. + * with the next higher node. */ -unsigned long -uncached_alloc_page(int nid) +unsigned long uncached_alloc_page(int starting_nid) { - unsigned long maddr; + unsigned long uc_addr; + struct gen_pool *pool; + int nid; - maddr = gen_pool_alloc(uncached_pool[nid], PAGE_SIZE); + if (unlikely(starting_nid >= MAX_NUMNODES)) + return 0; - dprintk(KERN_DEBUG "uncached_alloc_page returns %lx on node %i\n", - maddr, nid); + if (starting_nid < 0) + starting_nid = numa_node_id(); + nid = starting_nid; - /* - * If no memory is availble on our local node, try the - * remaining nodes in the system. - */ - if (!maddr) { - int i; - - for (i = MAX_NUMNODES - 1; i >= 0; i--) { - if (i == nid || !node_online(i)) - continue; - maddr = gen_pool_alloc(uncached_pool[i], PAGE_SIZE); - dprintk(KERN_DEBUG "uncached_alloc_page alternate search " - "returns %lx on node %i\n", maddr, i); - if (maddr) { - break; - } - } - } + do { + if (!node_online(nid)) + continue; + pool = uncached_pool[nid]; + if (pool == NULL) + continue; + do { + uc_addr = gen_pool_alloc(pool, PAGE_SIZE); + if (uc_addr != 0) + return uc_addr; + } while (uncached_add_chunk(pool, nid) == 0); + + } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid); - return maddr; + return 0; } EXPORT_SYMBOL(uncached_alloc_page); @@ -177,21 +190,22 @@ EXPORT_SYMBOL(uncached_alloc_page); /* * uncached_free_page * + * @uc_addr: uncached address of page to free + * * Free a single uncached page. */ -void -uncached_free_page(unsigned long maddr) +void uncached_free_page(unsigned long uc_addr) { - int node; - - node = paddr_to_nid(maddr - __IA64_UNCACHED_OFFSET); + int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET); + struct gen_pool *pool = uncached_pool[nid]; - dprintk(KERN_DEBUG "uncached_free_page(%lx) on node %i\n", maddr, node); + if (unlikely(pool == NULL)) + return; - if ((maddr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET) - panic("uncached_free_page invalid address %lx\n", maddr); + if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET) + panic("uncached_free_page invalid address %lx\n", uc_addr); - gen_pool_free(uncached_pool[node], maddr, PAGE_SIZE); + gen_pool_free(pool, uc_addr, PAGE_SIZE); } EXPORT_SYMBOL(uncached_free_page); @@ -199,43 +213,39 @@ EXPORT_SYMBOL(uncached_free_page); /* * uncached_build_memmap, * + * @uc_start: uncached starting address of a chunk of uncached memory + * @uc_end: uncached ending address of a chunk of uncached memory + * @arg: ignored, (NULL argument passed in on call to efi_memmap_walk_uc()) + * * Called at boot time to build a map of pages that can be used for * memory special operations. */ -static int __init -uncached_build_memmap(unsigned long start, unsigned long end, void *arg) +static int __init uncached_build_memmap(unsigned long uc_start, + unsigned long uc_end, void *arg) { - long length = end - start; - int node; - - dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end); + int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET); + struct gen_pool *pool = uncached_pool[nid]; + size_t size = uc_end - uc_start; touch_softlockup_watchdog(); - memset((char *)start, 0, length); - node = paddr_to_nid(start - __IA64_UNCACHED_OFFSET); - - for (; start < end ; start += PAGE_SIZE) { - dprintk(KERN_INFO "sticking %lx into the pool!\n", start); - gen_pool_free(uncached_pool[node], start, PAGE_SIZE); + if (pool != NULL) { + memset((char *)uc_start, 0, size); + (void) gen_pool_add(pool, uc_start, size, nid); } - return 0; } -static int __init uncached_init(void) { - int i; +static int __init uncached_init(void) +{ + int nid; - for (i = 0; i < MAX_NUMNODES; i++) { - if (!node_online(i)) - continue; - uncached_pool[i] = gen_pool_create(0, IA64_GRANULE_SHIFT, - &uncached_get_new_chunk, i); + for_each_online_node(nid) { + uncached_pool[nid] = gen_pool_create(PAGE_SHIFT, nid); } - efi_memmap_walk_uc(uncached_build_memmap); - + efi_memmap_walk_uc(uncached_build_memmap, NULL); return 0; } diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 783600fe52b..5b0d5f64a9b 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -1,4 +1,3 @@ -#include <linux/config.h> #include <asm/cache.h> #include <asm/ptrace.h> |