diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-parisc/irq.h | 5 | ||||
-rw-r--r-- | include/asm-parisc/smp.h | 7 | ||||
-rw-r--r-- | include/asm-parisc/spinlock.h | 19 | ||||
-rw-r--r-- | include/asm-parisc/tlbflush.h | 16 | ||||
-rw-r--r-- | include/linux/mm.h | 2 |
5 files changed, 25 insertions, 24 deletions
diff --git a/include/asm-parisc/irq.h b/include/asm-parisc/irq.h index f876bdf2205..b0a30e2c981 100644 --- a/include/asm-parisc/irq.h +++ b/include/asm-parisc/irq.h @@ -8,6 +8,7 @@ #define _ASM_PARISC_IRQ_H #include <linux/config.h> +#include <linux/cpumask.h> #include <asm/types.h> #define NO_IRQ (-1) @@ -49,10 +50,10 @@ extern int txn_alloc_irq(unsigned int nbits); extern int txn_claim_irq(int); extern unsigned int txn_alloc_data(unsigned int); extern unsigned long txn_alloc_addr(unsigned int); +extern unsigned long txn_affinity_addr(unsigned int irq, int cpu); extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *); - -extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *); +extern int cpu_check_affinity(unsigned int irq, cpumask_t *dest); /* soft power switch support (power.c) */ extern struct tasklet_struct power_tasklet; diff --git a/include/asm-parisc/smp.h b/include/asm-parisc/smp.h index 9413f67a540..dbdbd2e9fdf 100644 --- a/include/asm-parisc/smp.h +++ b/include/asm-parisc/smp.h @@ -29,6 +29,7 @@ extern cpumask_t cpu_online_map; #define cpu_logical_map(cpu) (cpu) extern void smp_send_reschedule(int cpu); +extern void smp_send_all_nop(void); #endif /* !ASSEMBLY */ @@ -53,7 +54,11 @@ extern unsigned long cpu_present_mask; #define raw_smp_processor_id() (current_thread_info()->cpu) -#endif /* CONFIG_SMP */ +#else /* CONFIG_SMP */ + +static inline void smp_send_all_nop(void) { return; } + +#endif #define NO_PROC_ID 0xFF /* No processor magic marker */ #define ANY_PROC_ID 0xFF /* Any processor magic marker */ diff --git a/include/asm-parisc/spinlock.h b/include/asm-parisc/spinlock.h index 7c3f406a746..16c2ac075fc 100644 --- a/include/asm-parisc/spinlock.h +++ b/include/asm-parisc/spinlock.h @@ -11,18 +11,25 @@ static inline int __raw_spin_is_locked(raw_spinlock_t *x) return *a == 0; } -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) +#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) #define __raw_spin_unlock_wait(x) \ do { cpu_relax(); } while (__raw_spin_is_locked(x)) -static inline void __raw_spin_lock(raw_spinlock_t *x) +static inline void __raw_spin_lock_flags(raw_spinlock_t *x, + unsigned long flags) { volatile unsigned int *a; mb(); a = __ldcw_align(x); while (__ldcw(a) == 0) - while (*a == 0); + while (*a == 0) + if (flags & PSW_SM_I) { + local_irq_enable(); + cpu_relax(); + local_irq_disable(); + } else + cpu_relax(); mb(); } @@ -60,26 +67,20 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x) static __inline__ void __raw_read_lock(raw_rwlock_t *rw) { - unsigned long flags; - local_irq_save(flags); __raw_spin_lock(&rw->lock); rw->counter++; __raw_spin_unlock(&rw->lock); - local_irq_restore(flags); } static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) { - unsigned long flags; - local_irq_save(flags); __raw_spin_lock(&rw->lock); rw->counter--; __raw_spin_unlock(&rw->lock); - local_irq_restore(flags); } /* write_lock is less trivial. We optimistically grab the lock and check diff --git a/include/asm-parisc/tlbflush.h b/include/asm-parisc/tlbflush.h index e97aa8d1eff..c9ec39c6fc6 100644 --- a/include/asm-parisc/tlbflush.h +++ b/include/asm-parisc/tlbflush.h @@ -12,21 +12,15 @@ * N class systems, only one PxTLB inter processor broadcast can be * active at any one time on the Merced bus. This tlb purge * synchronisation is fairly lightweight and harmless so we activate - * it on all SMP systems not just the N class. */ -#ifdef CONFIG_SMP + * it on all SMP systems not just the N class. We also need to have + * preemption disabled on uniprocessor machines, and spin_lock does that + * nicely. + */ extern spinlock_t pa_tlb_lock; #define purge_tlb_start(x) spin_lock(&pa_tlb_lock) #define purge_tlb_end(x) spin_unlock(&pa_tlb_lock) -#else - -#define purge_tlb_start(x) do { } while(0) -#define purge_tlb_end(x) do { } while (0) - -#endif - - extern void flush_tlb_all(void); /* @@ -88,7 +82,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */ flush_tlb_all(); else { - preempt_disable(); mtsp(vma->vm_mm->context,1); purge_tlb_start(); if (split_tlb) { @@ -102,7 +95,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, pdtlb(start); start += PAGE_SIZE; } - preempt_enable(); } purge_tlb_end(); } diff --git a/include/linux/mm.h b/include/linux/mm.h index 1013a42d10b..0986d19be0b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -940,7 +940,9 @@ unsigned long max_sane_readahead(unsigned long nr); /* Do stack extension */ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); +#ifdef CONFIG_IA64 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); +#endif /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); |