diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/include/asm/acpi.h | 6 | ||||
-rw-r--r-- | arch/ia64/include/asm/ftrace.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/kprobes.h | 5 | ||||
-rw-r--r-- | arch/ia64/include/asm/tlb.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/topology.h | 4 | ||||
-rw-r--r-- | arch/ia64/include/asm/types.h | 5 | ||||
-rw-r--r-- | arch/ia64/kernel/Makefile | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/acpi-processor.c | 85 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 5 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 2 | ||||
-rw-r--r-- | arch/ia64/kvm/vcpu.h | 9 | ||||
-rw-r--r-- | arch/ia64/kvm/vmm.c | 4 | ||||
-rw-r--r-- | arch/ia64/kvm/vtlb.c | 2 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 2 | ||||
-rw-r--r-- | arch/ia64/mm/tlb.c | 32 |
15 files changed, 49 insertions, 119 deletions
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index 91df9686a0d..7ae58892ba8 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h @@ -132,6 +132,12 @@ extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS]; extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; #endif +static inline bool arch_has_acpi_pdc(void) { return true; } +static inline void arch_acpi_set_pdc_bits(u32 *buf) +{ + buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP; +} + #define acpi_unlazy_tlb(x) #ifdef CONFIG_ACPI_NUMA diff --git a/arch/ia64/include/asm/ftrace.h b/arch/ia64/include/asm/ftrace.h index d20db3c2a65..fbd1a2470ca 100644 --- a/arch/ia64/include/asm/ftrace.h +++ b/arch/ia64/include/asm/ftrace.h @@ -8,7 +8,6 @@ extern void _mcount(unsigned long pfs, unsigned long r1, unsigned long b0, unsigned long r0); #define mcount _mcount -#include <asm/kprobes.h> /* In IA64, MCOUNT_ADDR is set in link time, so it's not a constant at compile time */ #define MCOUNT_ADDR (((struct fnptr *)mcount)->ip) #define FTRACE_ADDR (((struct fnptr *)ftrace_caller)->ip) diff --git a/arch/ia64/include/asm/kprobes.h b/arch/ia64/include/asm/kprobes.h index dbf83fb28db..d5505d6f238 100644 --- a/arch/ia64/include/asm/kprobes.h +++ b/arch/ia64/include/asm/kprobes.h @@ -103,11 +103,6 @@ typedef struct kprobe_opcode { bundle_t bundle; } kprobe_opcode_t; -struct fnptr { - unsigned long ip; - unsigned long gp; -}; - /* Architecture specific copy of original instruction*/ struct arch_specific_insn { /* copy of the instruction to be emulated */ diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index 85d965cb19a..23cce999eb1 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h @@ -74,7 +74,7 @@ struct ia64_tr_entry { extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size); extern void ia64_ptr_entry(u64 target_mask, int slot); -extern struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX]; +extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS]; /* region register macros diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h index 3ddb4e709db..d323071d0f9 100644 --- a/arch/ia64/include/asm/topology.h +++ b/arch/ia64/include/asm/topology.h @@ -33,7 +33,9 @@ /* * Returns a bitmask of CPUs on Node 'node'. */ -#define cpumask_of_node(node) (&node_to_cpu_mask[node]) +#define cpumask_of_node(node) ((node) == -1 ? \ + cpu_all_mask : \ + &node_to_cpu_mask[node]) /* * Returns the number of the node containing Node 'nid'. diff --git a/arch/ia64/include/asm/types.h b/arch/ia64/include/asm/types.h index bcd260e597d..93773fd37be 100644 --- a/arch/ia64/include/asm/types.h +++ b/arch/ia64/include/asm/types.h @@ -35,6 +35,11 @@ typedef unsigned int umode_t; */ # ifdef __KERNEL__ +struct fnptr { + unsigned long ip; + unsigned long gp; +}; + /* DMA addresses are 64-bits wide, in general. */ typedef u64 dma_addr_t; diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 2a75e937ae8..e1236349c99 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -18,10 +18,6 @@ obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o -ifneq ($(CONFIG_ACPI_PROCESSOR),) -obj-y += acpi-processor.o -endif - obj-$(CONFIG_IA64_PALINFO) += palinfo.o obj-$(CONFIG_IOSAPIC) += iosapic.o obj-$(CONFIG_MODULES) += module.o diff --git a/arch/ia64/kernel/acpi-processor.c b/arch/ia64/kernel/acpi-processor.c deleted file mode 100644 index dbda7bde611..00000000000 --- a/arch/ia64/kernel/acpi-processor.c +++ /dev/null @@ -1,85 +0,0 @@ -/* - * arch/ia64/kernel/acpi-processor.c - * - * Copyright (C) 2005 Intel Corporation - * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> - * - Added _PDC for platforms with Intel CPUs - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/acpi.h> - -#include <acpi/processor.h> -#include <asm/acpi.h> - -static void init_intel_pdc(struct acpi_processor *pr) -{ - struct acpi_object_list *obj_list; - union acpi_object *obj; - u32 *buf; - - /* allocate and initialize pdc. It will be used later. */ - obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL); - if (!obj_list) { - printk(KERN_ERR "Memory allocation error\n"); - return; - } - - obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL); - if (!obj) { - printk(KERN_ERR "Memory allocation error\n"); - kfree(obj_list); - return; - } - - buf = kmalloc(12, GFP_KERNEL); - if (!buf) { - printk(KERN_ERR "Memory allocation error\n"); - kfree(obj); - kfree(obj_list); - return; - } - - buf[0] = ACPI_PDC_REVISION_ID; - buf[1] = 1; - buf[2] = ACPI_PDC_EST_CAPABILITY_SMP; - /* - * The default of PDC_SMP_T_SWCOORD bit is set for IA64 cpu so - * that OSPM is capable of native ACPI throttling software - * coordination using BIOS supplied _TSD info. - */ - buf[2] |= ACPI_PDC_SMP_T_SWCOORD; - - obj->type = ACPI_TYPE_BUFFER; - obj->buffer.length = 12; - obj->buffer.pointer = (u8 *) buf; - obj_list->count = 1; - obj_list->pointer = obj; - pr->pdc = obj_list; - - return; -} - -/* Initialize _PDC data based on the CPU vendor */ -void arch_acpi_processor_init_pdc(struct acpi_processor *pr) -{ - pr->pdc = NULL; - init_intel_pdc(pr); - return; -} - -EXPORT_SYMBOL(arch_acpi_processor_init_pdc); - -void arch_acpi_processor_cleanup_pdc(struct acpi_processor *pr) -{ - if (pr->pdc) { - kfree(pr->pdc->pointer->buffer.pointer); - kfree(pr->pdc->pointer); - kfree(pr->pdc); - pr->pdc = NULL; - } -} - -EXPORT_SYMBOL(arch_acpi_processor_cleanup_pdc); diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 32f2639e9b0..378b4833024 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1225,9 +1225,12 @@ static void mca_insert_tr(u64 iord) unsigned long psr; int cpu = smp_processor_id(); + if (!ia64_idtrs[cpu]) + return; + psr = ia64_clear_ic(); for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) { - p = &__per_cpu_idtrs[cpu][iord-1][i]; + p = ia64_idtrs[cpu] + (iord - 1) * IA64_TR_ALLOC_MAX; if (p->pte & 0x1) { old_rr = ia64_get_rr(p->ifa); if (old_rr != p->rr) { diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 5246285a95f..6bcbe215b9a 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -2293,7 +2293,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur) * return -ENOMEM; */ - if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur) + if (size > task_rlimit(task, RLIMIT_MEMLOCK)) return -ENOMEM; /* diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h index 360724d3ae6..988911b4cc7 100644 --- a/arch/ia64/kvm/vcpu.h +++ b/arch/ia64/kvm/vcpu.h @@ -388,6 +388,9 @@ static inline u64 __gpfn_is_io(u64 gpfn) #define _vmm_raw_spin_lock(x) do {}while(0) #define _vmm_raw_spin_unlock(x) do {}while(0) #else +typedef struct { + volatile unsigned int lock; +} vmm_spinlock_t; #define _vmm_raw_spin_lock(x) \ do { \ __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ @@ -405,12 +408,12 @@ static inline u64 __gpfn_is_io(u64 gpfn) #define _vmm_raw_spin_unlock(x) \ do { barrier(); \ - ((spinlock_t *)x)->raw_lock.lock = 0; } \ + ((vmm_spinlock_t *)x)->lock = 0; } \ while (0) #endif -void vmm_spin_lock(spinlock_t *lock); -void vmm_spin_unlock(spinlock_t *lock); +void vmm_spin_lock(vmm_spinlock_t *lock); +void vmm_spin_unlock(vmm_spinlock_t *lock); enum { I_TLB = 1, D_TLB = 2 diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c index f4b4c899bb6..7a62f75778c 100644 --- a/arch/ia64/kvm/vmm.c +++ b/arch/ia64/kvm/vmm.c @@ -60,12 +60,12 @@ static void __exit kvm_vmm_exit(void) return ; } -void vmm_spin_lock(spinlock_t *lock) +void vmm_spin_lock(vmm_spinlock_t *lock) { _vmm_raw_spin_lock(lock); } -void vmm_spin_unlock(spinlock_t *lock) +void vmm_spin_unlock(vmm_spinlock_t *lock) { _vmm_raw_spin_unlock(lock); } diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c index 20b3852f7a6..4332f7ee520 100644 --- a/arch/ia64/kvm/vtlb.c +++ b/arch/ia64/kvm/vtlb.c @@ -182,7 +182,7 @@ void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps) { u64 i, dirty_pages = 1; u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT; - spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa); + vmm_spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa); void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE; dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT; diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index b9609c69343..7c0d4814a68 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -91,7 +91,7 @@ dma_mark_clean(void *addr, size_t size) inline void ia64_set_rbs_bot (void) { - unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16; + unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16; if (stack_size > MAX_USER_STACK_SIZE) stack_size = MAX_USER_STACK_SIZE; diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index ee09d261f2e..f3de9d7a98b 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -48,7 +48,7 @@ DEFINE_PER_CPU(u8, ia64_need_tlb_flush); DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/ DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/ -struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX]; +struct ia64_tr_entry *ia64_idtrs[NR_CPUS]; /* * Initializes the ia64_ctx.bitmap array based on max_ctx+1. @@ -429,10 +429,16 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size) struct ia64_tr_entry *p; int cpu = smp_processor_id(); + if (!ia64_idtrs[cpu]) { + ia64_idtrs[cpu] = kmalloc(2 * IA64_TR_ALLOC_MAX * + sizeof (struct ia64_tr_entry), GFP_KERNEL); + if (!ia64_idtrs[cpu]) + return -ENOMEM; + } r = -EINVAL; /*Check overlap with existing TR entries*/ if (target_mask & 0x1) { - p = &__per_cpu_idtrs[cpu][0][0]; + p = ia64_idtrs[cpu]; for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); i++, p++) { if (p->pte & 0x1) @@ -444,7 +450,7 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size) } } if (target_mask & 0x2) { - p = &__per_cpu_idtrs[cpu][1][0]; + p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX; for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); i++, p++) { if (p->pte & 0x1) @@ -459,16 +465,16 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size) for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) { switch (target_mask & 0x3) { case 1: - if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1)) + if (!((ia64_idtrs[cpu] + i)->pte & 0x1)) goto found; continue; case 2: - if (!(__per_cpu_idtrs[cpu][1][i].pte & 0x1)) + if (!((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1)) goto found; continue; case 3: - if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1) && - !(__per_cpu_idtrs[cpu][1][i].pte & 0x1)) + if (!((ia64_idtrs[cpu] + i)->pte & 0x1) && + !((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1)) goto found; continue; default: @@ -488,7 +494,7 @@ found: if (target_mask & 0x1) { ia64_itr(0x1, i, va, pte, log_size); ia64_srlz_i(); - p = &__per_cpu_idtrs[cpu][0][i]; + p = ia64_idtrs[cpu] + i; p->ifa = va; p->pte = pte; p->itir = log_size << 2; @@ -497,7 +503,7 @@ found: if (target_mask & 0x2) { ia64_itr(0x2, i, va, pte, log_size); ia64_srlz_i(); - p = &__per_cpu_idtrs[cpu][1][i]; + p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i; p->ifa = va; p->pte = pte; p->itir = log_size << 2; @@ -528,7 +534,7 @@ void ia64_ptr_entry(u64 target_mask, int slot) return; if (target_mask & 0x1) { - p = &__per_cpu_idtrs[cpu][0][slot]; + p = ia64_idtrs[cpu] + slot; if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { p->pte = 0; ia64_ptr(0x1, p->ifa, p->itir>>2); @@ -537,7 +543,7 @@ void ia64_ptr_entry(u64 target_mask, int slot) } if (target_mask & 0x2) { - p = &__per_cpu_idtrs[cpu][1][slot]; + p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + slot; if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { p->pte = 0; ia64_ptr(0x2, p->ifa, p->itir>>2); @@ -546,8 +552,8 @@ void ia64_ptr_entry(u64 target_mask, int slot) } for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) { - if ((__per_cpu_idtrs[cpu][0][i].pte & 0x1) || - (__per_cpu_idtrs[cpu][1][i].pte & 0x1)) + if (((ia64_idtrs[cpu] + i)->pte & 0x1) || + ((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1)) break; } per_cpu(ia64_tr_used, cpu) = i; |