diff options
Diffstat (limited to 'include')
149 files changed, 2439 insertions, 914 deletions
diff --git a/include/asm-alpha/core_t2.h b/include/asm-alpha/core_t2.h index dba70c62a16..457c34b6eb0 100644 --- a/include/asm-alpha/core_t2.h +++ b/include/asm-alpha/core_t2.h @@ -435,7 +435,7 @@ static inline void t2_outl(u32 b, unsigned long addr) set_hae(msb); \ } -static spinlock_t t2_hae_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(t2_hae_lock); __EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr) { diff --git a/include/asm-arm/assembler.h b/include/asm-arm/assembler.h index d53bafa9bf1..fce83282082 100644 --- a/include/asm-arm/assembler.h +++ b/include/asm-arm/assembler.h @@ -55,30 +55,6 @@ #define PLD(code...) #endif -#define MODE_USR USR_MODE -#define MODE_FIQ FIQ_MODE -#define MODE_IRQ IRQ_MODE -#define MODE_SVC SVC_MODE - -#define DEFAULT_FIQ MODE_FIQ - -/* - * LOADREGS - ldm with PC in register list (eg, ldmfd sp!, {pc}) - */ -#ifdef __STDC__ -#define LOADREGS(cond, base, reglist...)\ - ldm##cond base,reglist -#else -#define LOADREGS(cond, base, reglist...)\ - ldm/**/cond base,reglist -#endif - -/* - * Build a return instruction for this processor type. - */ -#define RETINSTR(instr, regs...)\ - instr regs - /* * Enable and disable interrupts */ @@ -117,18 +93,6 @@ msr cpsr_c, \oldcpsr .endm -/* - * These two are used to save LR/restore PC over a user-based access. - * The old 26-bit architecture requires that we do. On 32-bit - * architecture, we can safely ignore this requirement. - */ - .macro save_lr - .endm - - .macro restore_pc - mov pc, lr - .endm - #define USER(x...) \ 9999: x; \ .section __ex_table,"a"; \ diff --git a/include/asm-arm/hardware/locomo.h b/include/asm-arm/hardware/locomo.h index 5f10048ec54..22dfb173776 100644 --- a/include/asm-arm/hardware/locomo.h +++ b/include/asm-arm/hardware/locomo.h @@ -111,6 +111,8 @@ #define LOCOMO_ALS 0x00 /* Adjust light cycle */ #define LOCOMO_ALD 0x04 /* Adjust light duty */ +#define LOCOMO_ALC_EN 0x8000 + /* Backlight controller: TFT signal */ #define LOCOMO_BACKLIGHT 0x38 #define LOCOMO_TC 0x00 /* TFT control signal */ @@ -203,4 +205,7 @@ void locomo_gpio_write(struct locomo_dev *ldev, unsigned int bits, unsigned int /* M62332 control function */ void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int channel); +/* Frontlight control */ +void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf); + #endif diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 845cb67ad8e..8ceab7bcd8b 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -51,4 +51,10 @@ __ret; \ }) +#ifdef CONFIG_SMP +# define WARN_ON_SMP(x) WARN_ON(x) +#else +# define WARN_ON_SMP(x) do { } while (0) +#endif + #endif diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h index d79e9ee10fd..c61bd1a17f3 100644 --- a/include/asm-i386/alternative.h +++ b/include/asm-i386/alternative.h @@ -5,6 +5,8 @@ #include <asm/types.h> +#include <linux/types.h> + struct alt_instr { u8 *instr; /* original instruction */ u8 *replacement; diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h index 1d8362cb2c5..2c1e371cebb 100644 --- a/include/asm-i386/apic.h +++ b/include/asm-i386/apic.h @@ -111,24 +111,12 @@ extern void init_apic_mappings (void); extern void smp_local_timer_interrupt (struct pt_regs * regs); extern void setup_boot_APIC_clock (void); extern void setup_secondary_APIC_clock (void); -extern void setup_apic_nmi_watchdog (void); -extern int reserve_lapic_nmi(void); -extern void release_lapic_nmi(void); -extern void disable_timer_nmi_watchdog(void); -extern void enable_timer_nmi_watchdog(void); -extern void nmi_watchdog_tick (struct pt_regs * regs); extern int APIC_init_uniprocessor (void); extern void disable_APIC_timer(void); extern void enable_APIC_timer(void); extern void enable_NMI_through_LVT0 (void * dummy); -extern unsigned int nmi_watchdog; -#define NMI_NONE 0 -#define NMI_IO_APIC 1 -#define NMI_LOCAL_APIC 2 -#define NMI_INVALID 3 - extern int disable_timer_pin_1; void smp_send_timer_broadcast_ipi(struct pt_regs *regs); diff --git a/include/asm-i386/cpu.h b/include/asm-i386/cpu.h index e7252c216ca..b1bc7b1b64b 100644 --- a/include/asm-i386/cpu.h +++ b/include/asm-i386/cpu.h @@ -7,8 +7,6 @@ #include <linux/nodemask.h> #include <linux/percpu.h> -#include <asm/node.h> - struct i386_cpu { struct cpu cpu; }; diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h index 3ecedbafa8c..d314ebb3d59 100644 --- a/include/asm-i386/cpufeature.h +++ b/include/asm-i386/cpufeature.h @@ -72,6 +72,7 @@ #define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ +#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ diff --git a/include/asm-i386/delay.h b/include/asm-i386/delay.h index 456db8501c0..b1c7650dc7b 100644 --- a/include/asm-i386/delay.h +++ b/include/asm-i386/delay.h @@ -23,4 +23,6 @@ extern void __delay(unsigned long loops); ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ __ndelay(n)) +void use_tsc_delay(void); + #endif /* defined(_I386_DELAY_H) */ diff --git a/include/asm-i386/dwarf2.h b/include/asm-i386/dwarf2.h new file mode 100644 index 00000000000..2280f6272f8 --- /dev/null +++ b/include/asm-i386/dwarf2.h @@ -0,0 +1,54 @@ +#ifndef _DWARF2_H +#define _DWARF2_H + +#include <linux/config.h> + +#ifndef __ASSEMBLY__ +#warning "asm/dwarf2.h should be only included in pure assembly files" +#endif + +/* + Macros for dwarf2 CFI unwind table entries. + See "as.info" for details on these pseudo ops. Unfortunately + they are only supported in very new binutils, so define them + away for older version. + */ + +#ifdef CONFIG_UNWIND_INFO + +#define CFI_STARTPROC .cfi_startproc +#define CFI_ENDPROC .cfi_endproc +#define CFI_DEF_CFA .cfi_def_cfa +#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register +#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset +#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset +#define CFI_OFFSET .cfi_offset +#define CFI_REL_OFFSET .cfi_rel_offset +#define CFI_REGISTER .cfi_register +#define CFI_RESTORE .cfi_restore +#define CFI_REMEMBER_STATE .cfi_remember_state +#define CFI_RESTORE_STATE .cfi_restore_state + +#else + +/* Due to the structure of pre-exisiting code, don't use assembler line + comment character # to ignore the arguments. Instead, use a dummy macro. */ +.macro ignore a=0, b=0, c=0, d=0 +.endm + +#define CFI_STARTPROC ignore +#define CFI_ENDPROC ignore +#define CFI_DEF_CFA ignore +#define CFI_DEF_CFA_REGISTER ignore +#define CFI_DEF_CFA_OFFSET ignore +#define CFI_ADJUST_CFA_OFFSET ignore +#define CFI_OFFSET ignore +#define CFI_REL_OFFSET ignore +#define CFI_REGISTER ignore +#define CFI_RESTORE ignore +#define CFI_REMEMBER_STATE ignore +#define CFI_RESTORE_STATE ignore + +#endif + +#endif diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h index 4153d80e4d2..1eac92cb5b1 100644 --- a/include/asm-i386/elf.h +++ b/include/asm-i386/elf.h @@ -10,6 +10,7 @@ #include <asm/processor.h> #include <asm/system.h> /* for savesegment */ #include <asm/auxvec.h> +#include <asm/desc.h> #include <linux/utsname.h> @@ -129,15 +130,41 @@ extern int dump_task_extended_fpu (struct task_struct *, struct user_fxsr_struct #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) #define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs) -#define VSYSCALL_BASE (__fix_to_virt(FIX_VSYSCALL)) -#define VSYSCALL_EHDR ((const struct elfhdr *) VSYSCALL_BASE) -#define VSYSCALL_ENTRY ((unsigned long) &__kernel_vsyscall) +#define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO)) +#define VDSO_BASE ((unsigned long)current->mm->context.vdso) + +#ifdef CONFIG_COMPAT_VDSO +# define VDSO_COMPAT_BASE VDSO_HIGH_BASE +# define VDSO_PRELINK VDSO_HIGH_BASE +#else +# define VDSO_COMPAT_BASE VDSO_BASE +# define VDSO_PRELINK 0 +#endif + +#define VDSO_COMPAT_SYM(x) \ + (VDSO_COMPAT_BASE + (unsigned long)(x) - VDSO_PRELINK) + +#define VDSO_SYM(x) \ + (VDSO_BASE + (unsigned long)(x) - VDSO_PRELINK) + +#define VDSO_HIGH_EHDR ((const struct elfhdr *) VDSO_HIGH_BASE) +#define VDSO_EHDR ((const struct elfhdr *) VDSO_COMPAT_BASE) + extern void __kernel_vsyscall; +#define VDSO_ENTRY VDSO_SYM(&__kernel_vsyscall) + +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES +struct linux_binprm; +extern int arch_setup_additional_pages(struct linux_binprm *bprm, + int executable_stack); + +extern unsigned int vdso_enabled; + #define ARCH_DLINFO \ -do { \ - NEW_AUX_ENT(AT_SYSINFO, VSYSCALL_ENTRY); \ - NEW_AUX_ENT(AT_SYSINFO_EHDR, VSYSCALL_BASE); \ +do if (vdso_enabled) { \ + NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_COMPAT_BASE); \ } while (0) /* @@ -148,15 +175,15 @@ do { \ * Dumping its extra ELF program headers includes all the other information * a debugger needs to easily find how the vsyscall DSO was being used. */ -#define ELF_CORE_EXTRA_PHDRS (VSYSCALL_EHDR->e_phnum) +#define ELF_CORE_EXTRA_PHDRS (VDSO_HIGH_EHDR->e_phnum) #define ELF_CORE_WRITE_EXTRA_PHDRS \ do { \ const struct elf_phdr *const vsyscall_phdrs = \ - (const struct elf_phdr *) (VSYSCALL_BASE \ - + VSYSCALL_EHDR->e_phoff); \ + (const struct elf_phdr *) (VDSO_HIGH_BASE \ + + VDSO_HIGH_EHDR->e_phoff); \ int i; \ Elf32_Off ofs = 0; \ - for (i = 0; i < VSYSCALL_EHDR->e_phnum; ++i) { \ + for (i = 0; i < VDSO_HIGH_EHDR->e_phnum; ++i) { \ struct elf_phdr phdr = vsyscall_phdrs[i]; \ if (phdr.p_type == PT_LOAD) { \ BUG_ON(ofs != 0); \ @@ -174,10 +201,10 @@ do { \ #define ELF_CORE_WRITE_EXTRA_DATA \ do { \ const struct elf_phdr *const vsyscall_phdrs = \ - (const struct elf_phdr *) (VSYSCALL_BASE \ - + VSYSCALL_EHDR->e_phoff); \ + (const struct elf_phdr *) (VDSO_HIGH_BASE \ + + VDSO_HIGH_EHDR->e_phoff); \ int i; \ - for (i = 0; i < VSYSCALL_EHDR->e_phnum; ++i) { \ + for (i = 0; i < VDSO_HIGH_EHDR->e_phnum; ++i) { \ if (vsyscall_phdrs[i].p_type == PT_LOAD) \ DUMP_WRITE((void *) vsyscall_phdrs[i].p_vaddr, \ PAGE_ALIGN(vsyscall_phdrs[i].p_memsz)); \ diff --git a/include/asm-i386/fixmap.h b/include/asm-i386/fixmap.h index f7e068f4d2f..a48cc3f7ccc 100644 --- a/include/asm-i386/fixmap.h +++ b/include/asm-i386/fixmap.h @@ -51,7 +51,7 @@ */ enum fixed_addresses { FIX_HOLE, - FIX_VSYSCALL, + FIX_VDSO, #ifdef CONFIG_X86_LOCAL_APIC FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ #endif @@ -115,14 +115,6 @@ extern void __set_fixmap (enum fixed_addresses idx, #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) -/* - * This is the range that is readable by user mode, and things - * acting like user mode such as get_user_pages. - */ -#define FIXADDR_USER_START (__fix_to_virt(FIX_VSYSCALL)) -#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE) - - extern void __this_fixmap_does_not_exist(void); /* diff --git a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h index 95d3fd09029..a4c0a5a9ffd 100644 --- a/include/asm-i386/hw_irq.h +++ b/include/asm-i386/hw_irq.h @@ -19,6 +19,8 @@ struct hw_interrupt_type; +#define NMI_VECTOR 0x02 + /* * Various low-level irq details needed by irq.c, process.c, * time.c, io_apic.c and smp.c diff --git a/include/asm-i386/intel_arch_perfmon.h b/include/asm-i386/intel_arch_perfmon.h new file mode 100644 index 00000000000..134ea9cc528 --- /dev/null +++ b/include/asm-i386/intel_arch_perfmon.h @@ -0,0 +1,19 @@ +#ifndef X86_INTEL_ARCH_PERFMON_H +#define X86_INTEL_ARCH_PERFMON_H 1 + +#define MSR_ARCH_PERFMON_PERFCTR0 0xc1 +#define MSR_ARCH_PERFMON_PERFCTR1 0xc2 + +#define MSR_ARCH_PERFMON_EVENTSEL0 0x186 +#define MSR_ARCH_PERFMON_EVENTSEL1 0x187 + +#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) +#define ARCH_PERFMON_EVENTSEL_INT (1 << 20) +#define ARCH_PERFMON_EVENTSEL_OS (1 << 17) +#define ARCH_PERFMON_EVENTSEL_USR (1 << 16) + +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c) +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0) + +#endif /* X86_INTEL_ARCH_PERFMON_H */ diff --git a/include/asm-i386/k8.h b/include/asm-i386/k8.h new file mode 100644 index 00000000000..dfd88a6e604 --- /dev/null +++ b/include/asm-i386/k8.h @@ -0,0 +1 @@ +#include <asm-x86_64/k8.h> diff --git a/include/asm-i386/kdebug.h b/include/asm-i386/kdebug.h index 96d0828ce09..d18cdb9fc9a 100644 --- a/include/asm-i386/kdebug.h +++ b/include/asm-i386/kdebug.h @@ -19,6 +19,8 @@ struct die_args { extern int register_die_notifier(struct notifier_block *); extern int unregister_die_notifier(struct notifier_block *); +extern int register_page_fault_notifier(struct notifier_block *); +extern int unregister_page_fault_notifier(struct notifier_block *); extern struct atomic_notifier_head i386die_chain; diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h index 57d157c5cf8..0730a20f6db 100644 --- a/include/asm-i386/kprobes.h +++ b/include/asm-i386/kprobes.h @@ -44,6 +44,7 @@ typedef u8 kprobe_opcode_t; #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry #define ARCH_SUPPORTS_KRETPROBES +#define ARCH_INACTIVE_KPROBE_COUNT 0 void arch_remove_kprobe(struct kprobe *p); void kretprobe_trampoline(void); diff --git a/include/asm-i386/local.h b/include/asm-i386/local.h index e67fa08260f..3b4998c51d0 100644 --- a/include/asm-i386/local.h +++ b/include/asm-i386/local.h @@ -55,12 +55,26 @@ static __inline__ void local_sub(long i, local_t *v) * much more efficient than these naive implementations. Note they take * a variable, not an address. */ -#define cpu_local_read(v) local_read(&__get_cpu_var(v)) -#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) -#define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) -#define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) -#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) -#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) + +/* Need to disable preemption for the cpu local counters otherwise we could + still access a variable of a previous CPU in a non atomic way. */ +#define cpu_local_wrap_v(v) \ + ({ local_t res__; \ + preempt_disable(); \ + res__ = (v); \ + preempt_enable(); \ + res__; }) +#define cpu_local_wrap(v) \ + ({ preempt_disable(); \ + v; \ + preempt_enable(); }) \ + +#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v))) +#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i))) +#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v))) +#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v))) +#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v))) +#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v))) #define __cpu_local_inc(v) cpu_local_inc(v) #define __cpu_local_dec(v) cpu_local_dec(v) diff --git a/include/asm-i386/mach-default/mach_ipi.h b/include/asm-i386/mach-default/mach_ipi.h index a1d0072e36b..0dba244c86d 100644 --- a/include/asm-i386/mach-default/mach_ipi.h +++ b/include/asm-i386/mach-default/mach_ipi.h @@ -1,6 +1,9 @@ #ifndef __ASM_MACH_IPI_H #define __ASM_MACH_IPI_H +/* Avoid include hell */ +#define NMI_VECTOR 0x02 + void send_IPI_mask_bitmask(cpumask_t mask, int vector); void __send_IPI_shortcut(unsigned int shortcut, int vector); @@ -13,7 +16,7 @@ static inline void send_IPI_mask(cpumask_t mask, int vector) static inline void __local_send_IPI_allbutself(int vector) { - if (no_broadcast) { + if (no_broadcast || vector == NMI_VECTOR) { cpumask_t mask = cpu_online_map; cpu_clear(smp_processor_id(), mask); @@ -24,7 +27,7 @@ static inline void __local_send_IPI_allbutself(int vector) static inline void __local_send_IPI_all(int vector) { - if (no_broadcast) + if (no_broadcast || vector == NMI_VECTOR) send_IPI_mask(cpu_online_map, vector); else __send_IPI_shortcut(APIC_DEST_ALLINC, vector); diff --git a/include/asm-i386/mach-default/mach_timer.h b/include/asm-i386/mach-default/mach_timer.h index 4b9703bb028..807992fd417 100644 --- a/include/asm-i386/mach-default/mach_timer.h +++ b/include/asm-i386/mach-default/mach_timer.h @@ -15,7 +15,9 @@ #ifndef _MACH_TIMER_H #define _MACH_TIMER_H -#define CALIBRATE_LATCH (5 * LATCH) +#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */ +#define CALIBRATE_LATCH \ + ((CLOCK_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000) static inline void mach_prepare_counter(void) { diff --git a/include/asm-i386/mach-summit/mach_mpparse.h b/include/asm-i386/mach-summit/mach_mpparse.h index 1cce2b924a8..94268399170 100644 --- a/include/asm-i386/mach-summit/mach_mpparse.h +++ b/include/asm-i386/mach-summit/mach_mpparse.h @@ -2,6 +2,7 @@ #define __ASM_MACH_MPPARSE_H #include <mach_apic.h> +#include <asm/tsc.h> extern int use_cyclone; @@ -29,6 +30,7 @@ static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, (!strncmp(productid, "VIGIL SMP", 9) || !strncmp(productid, "EXA", 3) || !strncmp(productid, "RUTHLESS SMP", 12))){ + mark_tsc_unstable(); use_cyclone = 1; /*enable cyclone-timer*/ setup_summit(); return 1; @@ -42,6 +44,7 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) if (!strncmp(oem_id, "IBM", 3) && (!strncmp(oem_table_id, "SERVIGIL", 8) || !strncmp(oem_table_id, "EXA", 3))){ + mark_tsc_unstable(); use_cyclone = 1; /*enable cyclone-timer*/ setup_summit(); return 1; diff --git a/include/asm-i386/mmu.h b/include/asm-i386/mmu.h index f431a0b86d4..8358dd3df7a 100644 --- a/include/asm-i386/mmu.h +++ b/include/asm-i386/mmu.h @@ -12,6 +12,7 @@ typedef struct { int size; struct semaphore sem; void *ldt; + void *vdso; } mm_context_t; #endif diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h index 21f16638fc6..67d99479999 100644 --- a/include/asm-i386/nmi.h +++ b/include/asm-i386/nmi.h @@ -5,24 +5,38 @@ #define ASM_NMI_H #include <linux/pm.h> - + struct pt_regs; - + typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu); - -/** + +/** * set_nmi_callback * * Set a handler for an NMI. Only one handler may be * set. Return 1 if the NMI was handled. */ void set_nmi_callback(nmi_callback_t callback); - -/** + +/** * unset_nmi_callback * * Remove the handler previously set. */ void unset_nmi_callback(void); - + +extern void setup_apic_nmi_watchdog (void); +extern int reserve_lapic_nmi(void); +extern void release_lapic_nmi(void); +extern void disable_timer_nmi_watchdog(void); +extern void enable_timer_nmi_watchdog(void); +extern void nmi_watchdog_tick (struct pt_regs * regs); + +extern unsigned int nmi_watchdog; +#define NMI_DEFAULT -1 +#define NMI_NONE 0 +#define NMI_IO_APIC 1 +#define NMI_LOCAL_APIC 2 +#define NMI_INVALID 3 + #endif /* ASM_NMI_H */ diff --git a/include/asm-i386/node.h b/include/asm-i386/node.h deleted file mode 100644 index e13c6ffa72a..00000000000 --- a/include/asm-i386/node.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef _ASM_I386_NODE_H_ -#define _ASM_I386_NODE_H_ - -#include <linux/device.h> -#include <linux/mmzone.h> -#include <linux/node.h> -#include <linux/topology.h> -#include <linux/nodemask.h> - -struct i386_node { - struct node node; -}; -extern struct i386_node node_devices[MAX_NUMNODES]; - -static inline int arch_register_node(int num){ - int p_node; - struct node *parent = NULL; - - if (!node_online(num)) - return 0; - p_node = parent_node(num); - - if (p_node != num) - parent = &node_devices[p_node].node; - - return register_node(&node_devices[num].node, num, parent); -} - -#endif /* _ASM_I386_NODE_H_ */ diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h index e3a552fa553..f5bf544c729 100644 --- a/include/asm-i386/page.h +++ b/include/asm-i386/page.h @@ -96,6 +96,8 @@ typedef struct { unsigned long pgprot; } pgprot_t; #ifndef __ASSEMBLY__ +struct vm_area_struct; + /* * This much address space is reserved for vmalloc() and iomap() * as well as fixmap mappings. @@ -139,6 +141,7 @@ extern int page_is_ram(unsigned long pagenr); #include <asm-generic/memory_model.h> #include <asm-generic/page.h> +#define __HAVE_ARCH_GATE_AREA 1 #endif /* __KERNEL__ */ #endif /* _I386_PAGE_H */ diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index 0c83cf12eec..b32346d62e1 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h @@ -71,8 +71,12 @@ struct cpuinfo_x86 { cpumask_t llc_shared_map; /* cpus sharing the last level cache */ #endif unsigned char x86_max_cores; /* cpuid returned max cores value */ - unsigned char booted_cores; /* number of cores as seen by OS */ unsigned char apicid; +#ifdef CONFIG_SMP + unsigned char booted_cores; /* number of cores as seen by OS */ + __u8 phys_proc_id; /* Physical processor id. */ + __u8 cpu_core_id; /* Core id */ +#endif } __attribute__((__aligned__(SMP_CACHE_BYTES))); #define X86_VENDOR_INTEL 0 @@ -104,14 +108,13 @@ extern struct cpuinfo_x86 cpu_data[]; #define current_cpu_data boot_cpu_data #endif -extern int phys_proc_id[NR_CPUS]; -extern int cpu_core_id[NR_CPUS]; extern int cpu_llc_id[NR_CPUS]; extern char ignore_fpu_irq; extern void identify_cpu(struct cpuinfo_x86 *); extern void print_cpu_info(struct cpuinfo_x86 *); extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); +extern unsigned short num_cache_leaves; #ifdef CONFIG_X86_HT extern void detect_ht(struct cpuinfo_x86 *c); @@ -554,7 +557,7 @@ extern void prepare_to_copy(struct task_struct *tsk); extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern unsigned long thread_saved_pc(struct task_struct *tsk); -void show_trace(struct task_struct *task, unsigned long *stack); +void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack); unsigned long get_wchan(struct task_struct *p); diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index 0249f912a29..cab0180567f 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h @@ -427,7 +427,7 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l * does not enforce ordering, since there is no data dependency between * the read of "a" and the read of "b". Therefore, on some CPUs, such * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() - * in cases like thiswhere there are no data dependencies. + * in cases like this where there are no data dependencies. **/ #define read_barrier_depends() do { } while(0) diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h index 8420ed12491..2833fa2c0dd 100644 --- a/include/asm-i386/thread_info.h +++ b/include/asm-i386/thread_info.h @@ -37,6 +37,7 @@ struct thread_info { 0-0xBFFFFFFF for user-thead 0-0xFFFFFFFF for kernel-thread */ + void *sysenter_return; struct restart_block restart_block; unsigned long previous_esp; /* ESP of the previous stack in case @@ -83,17 +84,15 @@ struct thread_info { #define init_stack (init_thread_union.stack) +/* how to get the current stack pointer from C */ +register unsigned long current_stack_pointer asm("esp") __attribute_used__; + /* how to get the thread information struct from C */ static inline struct thread_info *current_thread_info(void) { - struct thread_info *ti; - __asm__("andl %%esp,%0; ":"=r" (ti) : "0" (~(THREAD_SIZE - 1))); - return ti; + return (struct thread_info *)(current_stack_pointer & ~(THREAD_SIZE - 1)); } -/* how to get the current stack pointer from C */ -register unsigned long current_stack_pointer asm("esp") __attribute_used__; - /* thread information allocation */ #ifdef CONFIG_DEBUG_STACK_USAGE #define alloc_thread_info(tsk) \ @@ -140,8 +139,7 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__; #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SECCOMP 8 /* secure computing */ #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ -#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ -#define TIF_MEMDIE 17 +#define TIF_MEMDIE 16 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) @@ -153,7 +151,6 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__; #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1<<TIF_SECCOMP) #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) -#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) /* work to do on interrupt/exception return */ #define _TIF_WORK_MASK \ @@ -170,6 +167,9 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__; * have to worry about atomic accesses. */ #define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ +#define TS_POLLING 0x0002 /* True if in idle loop and not sleeping */ + +#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING) #endif /* __KERNEL__ */ diff --git a/include/asm-i386/timer.h b/include/asm-i386/timer.h index aed16437479..d0ebd05f851 100644 --- a/include/asm-i386/timer.h +++ b/include/asm-i386/timer.h @@ -3,68 +3,11 @@ #include <linux/init.h> #include <linux/pm.h> -/** - * struct timer_ops - used to define a timer source - * - * @name: name of the timer. - * @init: Probes and initializes the timer. Takes clock= override - * string as an argument. Returns 0 on success, anything else - * on failure. - * @mark_offset: called by the timer interrupt. - * @get_offset: called by gettimeofday(). Returns the number of microseconds - * since the last timer interupt. - * @monotonic_clock: returns the number of nanoseconds since the init of the - * timer. - * @delay: delays this many clock cycles. - */ -struct timer_opts { - char* name; - void (*mark_offset)(void); - unsigned long (*get_offset)(void); - unsigned long long (*monotonic_clock)(void); - void (*delay)(unsigned long); - unsigned long (*read_timer)(void); - int (*suspend)(pm_message_t state); - int (*resume)(void); -}; - -struct init_timer_opts { - int (*init)(char *override); - struct timer_opts *opts; -}; - #define TICK_SIZE (tick_nsec / 1000) - -extern struct timer_opts* __init select_timer(void); -extern void clock_fallback(void); void setup_pit_timer(void); - /* Modifiers for buggy PIT handling */ - extern int pit_latch_buggy; - -extern struct timer_opts *cur_timer; extern int timer_ack; - -/* list of externed timers */ -extern struct timer_opts timer_none; -extern struct timer_opts timer_pit; -extern struct init_timer_opts timer_pit_init; -extern struct init_timer_opts timer_tsc_init; -#ifdef CONFIG_X86_CYCLONE_TIMER -extern struct init_timer_opts timer_cyclone_init; -#endif - -extern unsigned long calibrate_tsc(void); -extern unsigned long read_timer_tsc(void); -extern void init_cpu_khz(void); extern int recalibrate_cpu_khz(void); -#ifdef CONFIG_HPET_TIMER -extern struct init_timer_opts timer_hpet_init; -extern unsigned long calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr); -#endif -#ifdef CONFIG_X86_PM_TIMER -extern struct init_timer_opts timer_pmtmr_init; -#endif #endif diff --git a/include/asm-i386/timex.h b/include/asm-i386/timex.h index d434984303c..3666044409f 100644 --- a/include/asm-i386/timex.h +++ b/include/asm-i386/timex.h @@ -7,6 +7,7 @@ #define _ASMi386_TIMEX_H #include <asm/processor.h> +#include <asm/tsc.h> #ifdef CONFIG_X86_ELAN # define CLOCK_TICK_RATE 1189200 /* AMD Elan has different frequency! */ @@ -15,39 +16,6 @@ #endif -/* - * Standard way to access the cycle counter on i586+ CPUs. - * Currently only used on SMP. - * - * If you really have a SMP machine with i486 chips or older, - * compile for that, and this will just always return zero. - * That's ok, it just means that the nicer scheduling heuristics - * won't work for you. - * - * We only use the low 32 bits, and we'd simply better make sure - * that we reschedule before that wraps. Scheduling at least every - * four billion cycles just basically sounds like a good idea, - * regardless of how fast the machine is. - */ -typedef unsigned long long cycles_t; - -static inline cycles_t get_cycles (void) -{ - unsigned long long ret=0; - -#ifndef CONFIG_X86_TSC - if (!cpu_has_tsc) - return 0; -#endif - -#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC) - rdtscll(ret); -#endif - return ret; -} - -extern unsigned int cpu_khz; - extern int read_current_timer(unsigned long *timer_value); #define ARCH_HAS_READ_CURRENT_TIMER 1 diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h index b94e5eeef91..6adbd9b1ae8 100644 --- a/include/asm-i386/topology.h +++ b/include/asm-i386/topology.h @@ -28,10 +28,8 @@ #define _ASM_I386_TOPOLOGY_H #ifdef CONFIG_X86_HT -#define topology_physical_package_id(cpu) \ - (phys_proc_id[cpu] == BAD_APICID ? -1 : phys_proc_id[cpu]) -#define topology_core_id(cpu) \ - (cpu_core_id[cpu] == BAD_APICID ? 0 : cpu_core_id[cpu]) +#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id) +#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id) #define topology_core_siblings(cpu) (cpu_core_map[cpu]) #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) #endif @@ -114,4 +112,9 @@ extern unsigned long node_remap_size[]; extern cpumask_t cpu_coregroup_map(int cpu); +#ifdef CONFIG_SMP +#define mc_capable() (boot_cpu_data.x86_max_cores > 1) +#define smt_capable() (smp_num_siblings > 1) +#endif + #endif /* _ASM_I386_TOPOLOGY_H */ diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h new file mode 100644 index 00000000000..97b828ce31e --- /dev/null +++ b/include/asm-i386/tsc.h @@ -0,0 +1,49 @@ +/* + * linux/include/asm-i386/tsc.h + * + * i386 TSC related functions + */ +#ifndef _ASM_i386_TSC_H +#define _ASM_i386_TSC_H + +#include <linux/config.h> +#include <asm/processor.h> + +/* + * Standard way to access the cycle counter on i586+ CPUs. + * Currently only used on SMP. + * + * If you really have a SMP machine with i486 chips or older, + * compile for that, and this will just always return zero. + * That's ok, it just means that the nicer scheduling heuristics + * won't work for you. + * + * We only use the low 32 bits, and we'd simply better make sure + * that we reschedule before that wraps. Scheduling at least every + * four billion cycles just basically sounds like a good idea, + * regardless of how fast the machine is. + */ +typedef unsigned long long cycles_t; + +extern unsigned int cpu_khz; +extern unsigned int tsc_khz; + +static inline cycles_t get_cycles(void) +{ + unsigned long long ret = 0; + +#ifndef CONFIG_X86_TSC + if (!cpu_has_tsc) + return 0; +#endif + +#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC) + rdtscll(ret); +#endif + return ret; +} + +extern void tsc_init(void); +extern void mark_tsc_unstable(void); + +#endif diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h new file mode 100644 index 00000000000..69f0f1df672 --- /dev/null +++ b/include/asm-i386/unwind.h @@ -0,0 +1,98 @@ +#ifndef _ASM_I386_UNWIND_H +#define _ASM_I386_UNWIND_H + +/* + * Copyright (C) 2002-2006 Novell, Inc. + * Jan Beulich <jbeulich@novell.com> + * This code is released under version 2 of the GNU GPL. + */ + +#ifdef CONFIG_STACK_UNWIND + +#include <linux/sched.h> +#include <asm/fixmap.h> +#include <asm/ptrace.h> +#include <asm/uaccess.h> + +struct unwind_frame_info +{ + struct pt_regs regs; + struct task_struct *task; +}; + +#define UNW_PC(frame) (frame)->regs.eip +#define UNW_SP(frame) (frame)->regs.esp +#ifdef CONFIG_FRAME_POINTER +#define UNW_FP(frame) (frame)->regs.ebp +#define FRAME_RETADDR_OFFSET 4 +#define FRAME_LINK_OFFSET 0 +#define STACK_BOTTOM(tsk) STACK_LIMIT((tsk)->thread.esp0) +#define STACK_TOP(tsk) ((tsk)->thread.esp0) +#endif +#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1)) + +#define UNW_REGISTER_INFO \ + PTREGS_INFO(eax), \ + PTREGS_INFO(ecx), \ + PTREGS_INFO(edx), \ + PTREGS_INFO(ebx), \ + PTREGS_INFO(esp), \ + PTREGS_INFO(ebp), \ + PTREGS_INFO(esi), \ + PTREGS_INFO(edi), \ + PTREGS_INFO(eip) + +static inline void arch_unw_init_frame_info(struct unwind_frame_info *info, + /*const*/ struct pt_regs *regs) +{ + if (user_mode_vm(regs)) + info->regs = *regs; + else { + memcpy(&info->regs, regs, offsetof(struct pt_regs, esp)); + info->regs.esp = (unsigned long)®s->esp; + info->regs.xss = __KERNEL_DS; + } +} + +static inline void arch_unw_init_blocked(struct unwind_frame_info *info) +{ + memset(&info->regs, 0, sizeof(info->regs)); + info->regs.eip = info->task->thread.eip; + info->regs.xcs = __KERNEL_CS; + __get_user(info->regs.ebp, (long *)info->task->thread.esp); + info->regs.esp = info->task->thread.esp; + info->regs.xss = __KERNEL_DS; + info->regs.xds = __USER_DS; + info->regs.xes = __USER_DS; +} + +extern asmlinkage int arch_unwind_init_running(struct unwind_frame_info *, + asmlinkage int (*callback)(struct unwind_frame_info *, + void *arg), + void *arg); + +static inline int arch_unw_user_mode(const struct unwind_frame_info *info) +{ +#if 0 /* This can only work when selector register and EFLAGS saves/restores + are properly annotated (and tracked in UNW_REGISTER_INFO). */ + return user_mode_vm(&info->regs); +#else + return info->regs.eip < PAGE_OFFSET + || (info->regs.eip >= __fix_to_virt(FIX_VDSO) + && info->regs.eip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE) + || info->regs.esp < PAGE_OFFSET; +#endif +} + +#else + +#define UNW_PC(frame) ((void)(frame), 0) + +static inline int arch_unw_user_mode(const void *info) +{ + return 0; +} + +#endif + +#endif /* _ASM_I386_UNWIND_H */ diff --git a/include/asm-ia64/kdebug.h b/include/asm-ia64/kdebug.h index c195a9ad125..aed7142f9e4 100644 --- a/include/asm-ia64/kdebug.h +++ b/include/asm-ia64/kdebug.h @@ -40,6 +40,8 @@ struct die_args { extern int register_die_notifier(struct notifier_block *); extern int unregister_die_notifier(struct notifier_block *); +extern int register_page_fault_notifier(struct notifier_block *); +extern int unregister_page_fault_notifier(struct notifier_block *); extern struct atomic_notifier_head ia64die_chain; enum die_val { diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h index 8c0fc227f0f..2418a787c40 100644 --- a/include/asm-ia64/kprobes.h +++ b/include/asm-ia64/kprobes.h @@ -82,6 +82,7 @@ struct kprobe_ctlblk { #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry #define ARCH_SUPPORTS_KRETPROBES +#define ARCH_INACTIVE_KPROBE_COUNT 1 #define SLOT0_OPCODE_SHIFT (37) #define SLOT1_p1_OPCODE_SHIFT (37 - (64-46)) diff --git a/include/asm-ia64/nodedata.h b/include/asm-ia64/nodedata.h index a140310bf84..2fb337b0e9b 100644 --- a/include/asm-ia64/nodedata.h +++ b/include/asm-ia64/nodedata.h @@ -46,6 +46,18 @@ struct ia64_node_data { */ #define NODE_DATA(nid) (local_node_data->pg_data_ptrs[nid]) +/* + * LOCAL_DATA_ADDR - This is to calculate the address of other node's + * "local_node_data" at hot-plug phase. The local_node_data + * is pointed by per_cpu_page. Kernel usually use it for + * just executing cpu. However, when new node is hot-added, + * the addresses of local data for other nodes are necessary + * to update all of them. + */ +#define LOCAL_DATA_ADDR(pgdat) \ + ((struct ia64_node_data *)((u64)(pgdat) + \ + L1_CACHE_ALIGN(sizeof(struct pglist_data)))) + #endif /* CONFIG_NUMA */ #endif /* _ASM_IA64_NODEDATA_H */ diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h index e5392c4d30c..8bc9869e576 100644 --- a/include/asm-ia64/thread_info.h +++ b/include/asm-ia64/thread_info.h @@ -27,6 +27,7 @@ struct thread_info { __u32 flags; /* thread_info flags (see TIF_*) */ __u32 cpu; /* current CPU */ __u32 last_cpu; /* Last CPU thread ran on */ + __u32 status; /* Thread synchronous flags */ mm_segment_t addr_limit; /* user-level address space limit */ int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ struct restart_block restart_block; @@ -103,4 +104,8 @@ struct thread_info { /* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */ #define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)) +#define TS_POLLING 1 /* true if in idle loop and not sleeping */ + +#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING) + #endif /* _ASM_IA64_THREAD_INFO_H */ diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h index 616b5ed2aa7..937c2125752 100644 --- a/include/asm-ia64/topology.h +++ b/include/asm-ia64/topology.h @@ -112,6 +112,7 @@ void build_cpu_to_node_map(void); #define topology_core_id(cpu) (cpu_data(cpu)->core_id) #define topology_core_siblings(cpu) (cpu_core_map[cpu]) #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) +#define smt_capable() (smp_num_siblings > 1) #endif #include <asm-generic/topology.h> diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h index 33567e8bfe6..66c4742f09e 100644 --- a/include/asm-m32r/system.h +++ b/include/asm-m32r/system.h @@ -318,7 +318,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) * does not enforce ordering, since there is no data dependency between * the read of "a" and the read of "b". Therefore, on some CPUs, such * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() - * in cases like thiswhere there are no data dependencies. + * in cases like this where there are no data dependencies. **/ #define read_barrier_depends() do { } while (0) diff --git a/include/asm-m68knommu/page_offset.h b/include/asm-m68knommu/page_offset.h index 8ed6d7b7d9d..d4e73e0ba64 100644 --- a/include/asm-m68knommu/page_offset.h +++ b/include/asm-m68knommu/page_offset.h @@ -1,46 +1,5 @@ /* This handles the memory map.. */ - -#ifdef CONFIG_COLDFIRE -#if defined(CONFIG_SMALL) -#define PAGE_OFFSET_RAW 0x30020000 -#elif defined(CONFIG_CFV240) -#define PAGE_OFFSET_RAW 0x02000000 -#else -#define PAGE_OFFSET_RAW 0x00000000 -#endif -#endif - -#ifdef CONFIG_M68360 -#define PAGE_OFFSET_RAW 0x00000000 -#endif - -#ifdef CONFIG_PILOT -#ifdef CONFIG_M68328 -#define PAGE_OFFSET_RAW 0x10000000 -#endif -#ifdef CONFIG_M68EZ328 -#define PAGE_OFFSET_RAW 0x00000000 -#endif -#endif -#ifdef CONFIG_UCSIMM -#define PAGE_OFFSET_RAW 0x00000000 -#endif - -#if defined(CONFIG_UCDIMM) || defined(CONFIG_DRAGEN2) -#ifdef CONFIG_M68VZ328 -#define PAGE_OFFSET_RAW 0x00000000 -#endif /* CONFIG_M68VZ328 */ -#endif /* CONFIG_UCDIMM */ - -#ifdef CONFIG_M68EZ328ADS -#define PAGE_OFFSET_RAW 0x00000000 -#endif -#ifdef CONFIG_ALMA_ANS -#define PAGE_OFFSET_RAW 0x00000000 -#endif -#ifdef CONFIG_M68EN302 -#define PAGE_OFFSET_RAW 0x00000000 -#endif +#define PAGE_OFFSET_RAW CONFIG_RAMBASE diff --git a/include/asm-m68knommu/ptrace.h b/include/asm-m68knommu/ptrace.h index 1e19c457de7..47258e86e8c 100644 --- a/include/asm-m68knommu/ptrace.h +++ b/include/asm-m68knommu/ptrace.h @@ -46,11 +46,9 @@ struct pt_regs { #else unsigned short sr; unsigned long pc; -#ifndef NO_FORMAT_VEC unsigned format : 4; /* frame format specifier */ unsigned vector : 12; /* vector offset */ #endif -#endif }; /* diff --git a/include/asm-powerpc/kdebug.h b/include/asm-powerpc/kdebug.h index c01786ab5fa..532bfee934f 100644 --- a/include/asm-powerpc/kdebug.h +++ b/include/asm-powerpc/kdebug.h @@ -18,6 +18,8 @@ struct die_args { extern int register_die_notifier(struct notifier_block *); extern int unregister_die_notifier(struct notifier_block *); +extern int register_page_fault_notifier(struct notifier_block *); +extern int unregister_page_fault_notifier(struct notifier_block *); extern struct atomic_notifier_head powerpc_die_chain; /* Grossly misnamed. */ diff --git a/include/asm-powerpc/kprobes.h b/include/asm-powerpc/kprobes.h index f466bc804f4..2d0af52c823 100644 --- a/include/asm-powerpc/kprobes.h +++ b/include/asm-powerpc/kprobes.h @@ -50,6 +50,8 @@ typedef unsigned int kprobe_opcode_t; IS_TWI(instr) || IS_TDI(instr)) #define ARCH_SUPPORTS_KRETPROBES +#define ARCH_INACTIVE_KPROBE_COUNT 1 + void kretprobe_trampoline(void); extern void arch_remove_kprobe(struct kprobe *p); diff --git a/include/asm-powerpc/topology.h b/include/asm-powerpc/topology.h index 92f3e5507d2..bbc3844b086 100644 --- a/include/asm-powerpc/topology.h +++ b/include/asm-powerpc/topology.h @@ -93,5 +93,10 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev, #endif /* CONFIG_NUMA */ +#ifdef CONFIG_SMP +#include <asm/cputable.h> +#define smt_capable() (cpu_has_feature(CPU_FTR_SMT)) +#endif + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_TOPOLOGY_H */ diff --git a/include/asm-sparc/io.h b/include/asm-sparc/io.h index a42df208d59..cab0b851b8b 100644 --- a/include/asm-sparc/io.h +++ b/include/asm-sparc/io.h @@ -249,6 +249,22 @@ extern void __iomem *ioremap(unsigned long offset, unsigned long size); #define ioremap_nocache(X,Y) ioremap((X),(Y)) extern void iounmap(volatile void __iomem *addr); +#define ioread8(X) readb(X) +#define ioread16(X) readw(X) +#define ioread32(X) readl(X) +#define iowrite8(val,X) writeb(val,X) +#define iowrite16(val,X) writew(val,X) +#define iowrite32(val,X) writel(val,X) + +/* Create a virtual mapping cookie for an IO port range */ +extern void __iomem *ioport_map(unsigned long port, unsigned int nr); +extern void ioport_unmap(void __iomem *); + +/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ +struct pci_dev; +extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); +extern void pci_iounmap(struct pci_dev *dev, void __iomem *); + /* * Bus number may be in res->flags... somewhere. */ diff --git a/include/asm-sparc/prom.h b/include/asm-sparc/prom.h index c5e3d26eabd..f9cf44c0716 100644 --- a/include/asm-sparc/prom.h +++ b/include/asm-sparc/prom.h @@ -35,6 +35,8 @@ struct property { int length; void *value; struct property *next; + unsigned long _flags; + unsigned int unique_id; }; struct device_node { @@ -58,8 +60,15 @@ struct device_node { struct kref kref; unsigned long _flags; void *data; + unsigned int unique_id; }; +/* flag descriptions */ +#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */ + +#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) +#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) + static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de) { dn->pde = de; @@ -88,6 +97,7 @@ extern struct property *of_find_property(struct device_node *np, extern int of_device_is_compatible(struct device_node *device, const char *); extern void *of_get_property(struct device_node *node, const char *name, int *lenp); +extern int of_set_property(struct device_node *node, const char *name, void *val, int len); extern int of_getintprop_default(struct device_node *np, const char *name, int def); diff --git a/include/asm-sparc64/dma-mapping.h b/include/asm-sparc64/dma-mapping.h index 3c2b5bc8650..0f5b89c9323 100644 --- a/include/asm-sparc64/dma-mapping.h +++ b/include/asm-sparc64/dma-mapping.h @@ -162,4 +162,47 @@ static inline void dma_free_coherent(struct device *dev, size_t size, #endif /* PCI */ + +/* Now for the API extensions over the pci_ one */ + +#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) +#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) +#define dma_is_consistent(d) (1) + +static inline int +dma_get_cache_alignment(void) +{ + /* no easy way to get cache size on all processors, so return + * the maximum possible, to be safe */ + return (1 << INTERNODE_CACHE_SHIFT); +} + +static inline void +dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + /* just sync everything, that's all the pci API can do */ + dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction); +} + +static inline void +dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + /* just sync everything, that's all the pci API can do */ + dma_sync_single_for_device(dev, dma_handle, offset+size, direction); +} + +static inline void +dma_cache_sync(void *vaddr, size_t size, + enum dma_data_direction direction) +{ + /* could define this in terms of the dma_cache ... operations, + * but if you get this on a platform, you should convert the platform + * to using the generic device DMA API */ + BUG(); +} + #endif /* _ASM_SPARC64_DMA_MAPPING_H */ diff --git a/include/asm-sparc64/floppy.h b/include/asm-sparc64/floppy.h index f8d57bb5570..b591d0e8d8f 100644 --- a/include/asm-sparc64/floppy.h +++ b/include/asm-sparc64/floppy.h @@ -208,7 +208,55 @@ static void sun_fd_enable_dma(void) pdma_areasize = pdma_size; } -extern irqreturn_t sparc_floppy_irq(int, void *, struct pt_regs *); +irqreturn_t sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs) +{ + if (likely(doing_pdma)) { + void __iomem *stat = (void __iomem *) fdc_status; + unsigned char *vaddr = pdma_vaddr; + unsigned long size = pdma_size; + u8 val; + + while (size) { + val = readb(stat); + if (unlikely(!(val & 0x80))) { + pdma_vaddr = vaddr; + pdma_size = size; + return IRQ_HANDLED; + } + if (unlikely(!(val & 0x20))) { + pdma_vaddr = vaddr; + pdma_size = size; + doing_pdma = 0; + goto main_interrupt; + } + if (val & 0x40) { + /* read */ + *vaddr++ = readb(stat + 1); + } else { + unsigned char data = *vaddr++; + + /* write */ + writeb(data, stat + 1); + } + size--; + } + + pdma_vaddr = vaddr; + pdma_size = size; + + /* Send Terminal Count pulse to floppy controller. */ + val = readb(auxio_register); + val |= AUXIO_AUX1_FTCNT; + writeb(val, auxio_register); + val &= ~AUXIO_AUX1_FTCNT; + writeb(val, auxio_register); + + doing_pdma = 0; + } + +main_interrupt: + return floppy_interrupt(irq, dev_cookie, regs); +} static int sun_fd_request_irq(void) { diff --git a/include/asm-sparc64/kdebug.h b/include/asm-sparc64/kdebug.h index 4040d127ac3..11251bdd00c 100644 --- a/include/asm-sparc64/kdebug.h +++ b/include/asm-sparc64/kdebug.h @@ -17,6 +17,8 @@ struct die_args { extern int register_die_notifier(struct notifier_block *); extern int unregister_die_notifier(struct notifier_block *); +extern int register_page_fault_notifier(struct notifier_block *); +extern int unregister_page_fault_notifier(struct notifier_block *); extern struct atomic_notifier_head sparc64die_chain; extern void bad_trap(struct pt_regs *, long); diff --git a/include/asm-sparc64/kprobes.h b/include/asm-sparc64/kprobes.h index e9bb26f770e..15065af566c 100644 --- a/include/asm-sparc64/kprobes.h +++ b/include/asm-sparc64/kprobes.h @@ -12,6 +12,7 @@ typedef u32 kprobe_opcode_t; #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry #define arch_remove_kprobe(p) do {} while (0) +#define ARCH_INACTIVE_KPROBE_COUNT 0 /* Architecture specific copy of original instruction*/ struct arch_specific_insn { diff --git a/include/asm-sparc64/prom.h b/include/asm-sparc64/prom.h index 6d1556c0c26..265614d497c 100644 --- a/include/asm-sparc64/prom.h +++ b/include/asm-sparc64/prom.h @@ -35,6 +35,8 @@ struct property { int length; void *value; struct property *next; + unsigned long _flags; + unsigned int unique_id; }; struct device_node { @@ -58,8 +60,15 @@ struct device_node { struct kref kref; unsigned long _flags; void *data; + unsigned int unique_id; }; +/* flag descriptions */ +#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */ + +#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) +#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) + static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de) { dn->pde = de; @@ -88,6 +97,7 @@ extern struct property *of_find_property(struct device_node *np, extern int of_device_is_compatible(struct device_node *device, const char *); extern void *of_get_property(struct device_node *node, const char *name, int *lenp); +extern int of_set_property(struct device_node *node, const char *name, void *val, int len); extern int of_getintprop_default(struct device_node *np, const char *name, int def); diff --git a/include/asm-sparc64/topology.h b/include/asm-sparc64/topology.h index 0e234e201bd..98a6c613589 100644 --- a/include/asm-sparc64/topology.h +++ b/include/asm-sparc64/topology.h @@ -1,6 +1,9 @@ #ifndef _ASM_SPARC64_TOPOLOGY_H #define _ASM_SPARC64_TOPOLOGY_H +#include <asm/spitfire.h> +#define smt_capable() (tlb_type == hypervisor) + #include <asm-generic/topology.h> #endif /* _ASM_SPARC64_TOPOLOGY_H */ diff --git a/include/asm-x86_64/alternative.h b/include/asm-x86_64/alternative.h new file mode 100644 index 00000000000..387c8f66af7 --- /dev/null +++ b/include/asm-x86_64/alternative.h @@ -0,0 +1,146 @@ +#ifndef _X86_64_ALTERNATIVE_H +#define _X86_64_ALTERNATIVE_H + +#ifdef __KERNEL__ + +#include <linux/types.h> + +struct alt_instr { + u8 *instr; /* original instruction */ + u8 *replacement; + u8 cpuid; /* cpuid bit set for replacement */ + u8 instrlen; /* length of original instruction */ + u8 replacementlen; /* length of new instruction, <= instrlen */ + u8 pad[5]; +}; + +extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); + +struct module; +extern void alternatives_smp_module_add(struct module *mod, char *name, + void *locks, void *locks_end, + void *text, void *text_end); +extern void alternatives_smp_module_del(struct module *mod); +extern void alternatives_smp_switch(int smp); + +#endif + +/* + * Alternative instructions for different CPU types or capabilities. + * + * This allows to use optimized instructions even on generic binary + * kernels. + * + * length of oldinstr must be longer or equal the length of newinstr + * It can be padded with nops as needed. + * + * For non barrier like inlines please define new variants + * without volatile and memory clobber. + */ +#define alternative(oldinstr, newinstr, feature) \ + asm volatile ("661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + " .align 8\n" \ + " .quad 661b\n" /* label */ \ + " .quad 663f\n" /* new instruction */ \ + " .byte %c0\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .altinstr_replacement,\"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */ \ + ".previous" :: "i" (feature) : "memory") + +/* + * Alternative inline assembly with input. + * + * Pecularities: + * No memory clobber here. + * Argument numbers start with 1. + * Best is to use constraints that are fixed size (like (%1) ... "r") + * If you use variable sized constraints like "m" or "g" in the + * replacement make sure to pad to the worst case length. + */ +#define alternative_input(oldinstr, newinstr, feature, input...) \ + asm volatile ("661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + " .align 8\n" \ + " .quad 661b\n" /* label */ \ + " .quad 663f\n" /* new instruction */ \ + " .byte %c0\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .altinstr_replacement,\"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */ \ + ".previous" :: "i" (feature), ##input) + +/* Like alternative_input, but with a single output argument */ +#define alternative_io(oldinstr, newinstr, feature, output, input...) \ + asm volatile ("661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + " .align 8\n" \ + " .quad 661b\n" /* label */ \ + " .quad 663f\n" /* new instruction */ \ + " .byte %c[feat]\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .altinstr_replacement,\"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */ \ + ".previous" : output : [feat] "i" (feature), ##input) + +/* + * Alternative inline assembly for SMP. + * + * alternative_smp() takes two versions (SMP first, UP second) and is + * for more complex stuff such as spinlocks. + * + * The LOCK_PREFIX macro defined here replaces the LOCK and + * LOCK_PREFIX macros used everywhere in the source tree. + * + * SMP alternatives use the same data structures as the other + * alternatives and the X86_FEATURE_UP flag to indicate the case of a + * UP system running a SMP kernel. The existing apply_alternatives() + * works fine for patching a SMP kernel for UP. + * + * The SMP alternative tables can be kept after boot and contain both + * UP and SMP versions of the instructions to allow switching back to + * SMP at runtime, when hotplugging in a new CPU, which is especially + * useful in virtualized environments. + * + * The very common lock prefix is handled as special case in a + * separate table which is a pure address list without replacement ptr + * and size information. That keeps the table sizes small. + */ + +#ifdef CONFIG_SMP +#define alternative_smp(smpinstr, upinstr, args...) \ + asm volatile ("661:\n\t" smpinstr "\n662:\n" \ + ".section .smp_altinstructions,\"a\"\n" \ + " .align 8\n" \ + " .quad 661b\n" /* label */ \ + " .quad 663f\n" /* new instruction */ \ + " .byte 0x66\n" /* X86_FEATURE_UP */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .smp_altinstr_replacement,\"awx\"\n" \ + "663:\n\t" upinstr "\n" /* replacement */ \ + "664:\n\t.fill 662b-661b,1,0x42\n" /* space for original */ \ + ".previous" : args) + +#define LOCK_PREFIX \ + ".section .smp_locks,\"a\"\n" \ + " .align 8\n" \ + " .quad 661f\n" /* address */ \ + ".previous\n" \ + "661:\n\tlock; " + +#else /* ! CONFIG_SMP */ +#define alternative_smp(smpinstr, upinstr, args...) \ + asm volatile (upinstr : args) +#define LOCK_PREFIX "" +#endif + +#endif /* _X86_64_ALTERNATIVE_H */ diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h index a731be2204d..9c96a0a8d1b 100644 --- a/include/asm-x86_64/apic.h +++ b/include/asm-x86_64/apic.h @@ -49,7 +49,8 @@ static __inline unsigned int apic_read(unsigned long reg) static __inline__ void apic_wait_icr_idle(void) { - while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY ); + while (apic_read( APIC_ICR ) & APIC_ICR_BUSY) + cpu_relax(); } static inline void ack_APIC_irq(void) @@ -79,30 +80,23 @@ extern void init_apic_mappings (void); extern void smp_local_timer_interrupt (struct pt_regs * regs); extern void setup_boot_APIC_clock (void); extern void setup_secondary_APIC_clock (void); -extern void setup_apic_nmi_watchdog (void); -extern int reserve_lapic_nmi(void); -extern void release_lapic_nmi(void); -extern void disable_timer_nmi_watchdog(void); -extern void enable_timer_nmi_watchdog(void); -extern void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); extern int APIC_init_uniprocessor (void); extern void disable_APIC_timer(void); extern void enable_APIC_timer(void); extern void clustered_apic_check(void); -extern void nmi_watchdog_default(void); -extern int setup_nmi_watchdog(char *); +extern void setup_APIC_extened_lvt(unsigned char lvt_off, unsigned char vector, + unsigned char msg_type, unsigned char mask); -extern unsigned int nmi_watchdog; -#define NMI_DEFAULT -1 -#define NMI_NONE 0 -#define NMI_IO_APIC 1 -#define NMI_LOCAL_APIC 2 -#define NMI_INVALID 3 +#define K8_APIC_EXT_LVT_BASE 0x500 +#define K8_APIC_EXT_INT_MSG_FIX 0x0 +#define K8_APIC_EXT_INT_MSG_SMI 0x2 +#define K8_APIC_EXT_INT_MSG_NMI 0x4 +#define K8_APIC_EXT_INT_MSG_EXT 0x7 +#define K8_APIC_EXT_LVT_ENTRY_THRESHOLD 0 extern int disable_timer_pin_1; -extern void setup_threshold_lvt(unsigned long lvt_off); void smp_send_timer_broadcast_ipi(void); void switch_APIC_timer_to_ipi(void *cpumask); diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h index bd3fa67ed83..007e88d6d43 100644 --- a/include/asm-x86_64/atomic.h +++ b/include/asm-x86_64/atomic.h @@ -1,7 +1,7 @@ #ifndef __ARCH_X86_64_ATOMIC__ #define __ARCH_X86_64_ATOMIC__ -#include <asm/types.h> +#include <asm/alternative.h> /* atomic_t should be 32 bit signed type */ @@ -52,7 +52,7 @@ typedef struct { volatile int counter; } atomic_t; static __inline__ void atomic_add(int i, atomic_t *v) { __asm__ __volatile__( - LOCK "addl %1,%0" + LOCK_PREFIX "addl %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } @@ -67,7 +67,7 @@ static __inline__ void atomic_add(int i, atomic_t *v) static __inline__ void atomic_sub(int i, atomic_t *v) { __asm__ __volatile__( - LOCK "subl %1,%0" + LOCK_PREFIX "subl %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } @@ -86,7 +86,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v) unsigned char c; __asm__ __volatile__( - LOCK "subl %2,%0; sete %1" + LOCK_PREFIX "subl %2,%0; sete %1" :"=m" (v->counter), "=qm" (c) :"ir" (i), "m" (v->counter) : "memory"); return c; @@ -101,7 +101,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v) static __inline__ void atomic_inc(atomic_t *v) { __asm__ __volatile__( - LOCK "incl %0" + LOCK_PREFIX "incl %0" :"=m" (v->counter) :"m" (v->counter)); } @@ -115,7 +115,7 @@ static __inline__ void atomic_inc(atomic_t *v) static __inline__ void atomic_dec(atomic_t *v) { __asm__ __volatile__( - LOCK "decl %0" + LOCK_PREFIX "decl %0" :"=m" (v->counter) :"m" (v->counter)); } @@ -133,7 +133,7 @@ static __inline__ int atomic_dec_and_test(atomic_t *v) unsigned char c; __asm__ __volatile__( - LOCK "decl %0; sete %1" + LOCK_PREFIX "decl %0; sete %1" :"=m" (v->counter), "=qm" (c) :"m" (v->counter) : "memory"); return c != 0; @@ -152,7 +152,7 @@ static __inline__ int atomic_inc_and_test(atomic_t *v) unsigned char c; __asm__ __volatile__( - LOCK "incl %0; sete %1" + LOCK_PREFIX "incl %0; sete %1" :"=m" (v->counter), "=qm" (c) :"m" (v->counter) : "memory"); return c != 0; @@ -172,7 +172,7 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v) unsigned char c; __asm__ __volatile__( - LOCK "addl %2,%0; sets %1" + LOCK_PREFIX "addl %2,%0; sets %1" :"=m" (v->counter), "=qm" (c) :"ir" (i), "m" (v->counter) : "memory"); return c; @@ -189,7 +189,7 @@ static __inline__ int atomic_add_return(int i, atomic_t *v) { int __i = i; __asm__ __volatile__( - LOCK "xaddl %0, %1;" + LOCK_PREFIX "xaddl %0, %1;" :"=r"(i) :"m"(v->counter), "0"(i)); return i + __i; @@ -237,7 +237,7 @@ typedef struct { volatile long counter; } atomic64_t; static __inline__ void atomic64_add(long i, atomic64_t *v) { __asm__ __volatile__( - LOCK "addq %1,%0" + LOCK_PREFIX "addq %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } @@ -252,7 +252,7 @@ static __inline__ void atomic64_add(long i, atomic64_t *v) static __inline__ void atomic64_sub(long i, atomic64_t *v) { __asm__ __volatile__( - LOCK "subq %1,%0" + LOCK_PREFIX "subq %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } @@ -271,7 +271,7 @@ static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v) unsigned char c; __asm__ __volatile__( - LOCK "subq %2,%0; sete %1" + LOCK_PREFIX "subq %2,%0; sete %1" :"=m" (v->counter), "=qm" (c) :"ir" (i), "m" (v->counter) : "memory"); return c; @@ -286,7 +286,7 @@ static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v) static __inline__ void atomic64_inc(atomic64_t *v) { __asm__ __volatile__( - LOCK "incq %0" + LOCK_PREFIX "incq %0" :"=m" (v->counter) :"m" (v->counter)); } @@ -300,7 +300,7 @@ static __inline__ void atomic64_inc(atomic64_t *v) static __inline__ void atomic64_dec(atomic64_t *v) { __asm__ __volatile__( - LOCK "decq %0" + LOCK_PREFIX "decq %0" :"=m" (v->counter) :"m" (v->counter)); } @@ -318,7 +318,7 @@ static __inline__ int atomic64_dec_and_test(atomic64_t *v) unsigned char c; __asm__ __volatile__( - LOCK "decq %0; sete %1" + LOCK_PREFIX "decq %0; sete %1" :"=m" (v->counter), "=qm" (c) :"m" (v->counter) : "memory"); return c != 0; @@ -337,7 +337,7 @@ static __inline__ int atomic64_inc_and_test(atomic64_t *v) unsigned char c; __asm__ __volatile__( - LOCK "incq %0; sete %1" + LOCK_PREFIX "incq %0; sete %1" :"=m" (v->counter), "=qm" (c) :"m" (v->counter) : "memory"); return c != 0; @@ -357,7 +357,7 @@ static __inline__ int atomic64_add_negative(long i, atomic64_t *v) unsigned char c; __asm__ __volatile__( - LOCK "addq %2,%0; sets %1" + LOCK_PREFIX "addq %2,%0; sets %1" :"=m" (v->counter), "=qm" (c) :"ir" (i), "m" (v->counter) : "memory"); return c; @@ -374,7 +374,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t *v) { long __i = i; __asm__ __volatile__( - LOCK "xaddq %0, %1;" + LOCK_PREFIX "xaddq %0, %1;" :"=r"(i) :"m"(v->counter), "0"(i)); return i + __i; @@ -418,11 +418,11 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v) /* These are x86-specific, used by some header files */ #define atomic_clear_mask(mask, addr) \ -__asm__ __volatile__(LOCK "andl %0,%1" \ +__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ : : "r" (~(mask)),"m" (*addr) : "memory") #define atomic_set_mask(mask, addr) \ -__asm__ __volatile__(LOCK "orl %0,%1" \ +__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ : : "r" ((unsigned)mask),"m" (*(addr)) : "memory") /* Atomic operations are already serializing on x86 */ diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h index e9bf933d25d..f7ba57b1cc0 100644 --- a/include/asm-x86_64/bitops.h +++ b/include/asm-x86_64/bitops.h @@ -5,12 +5,7 @@ * Copyright 1992, Linus Torvalds. */ - -#ifdef CONFIG_SMP -#define LOCK_PREFIX "lock ; " -#else -#define LOCK_PREFIX "" -#endif +#include <asm/alternative.h> #define ADDR (*(volatile long *) addr) diff --git a/include/asm-x86_64/calgary.h b/include/asm-x86_64/calgary.h new file mode 100644 index 00000000000..6e1654f3098 --- /dev/null +++ b/include/asm-x86_64/calgary.h @@ -0,0 +1,66 @@ +/* + * Derived from include/asm-powerpc/iommu.h + * + * Copyright (C) 2006 Jon Mason <jdmason@us.ibm.com>, IBM Corporation + * Copyright (C) 2006 Muli Ben-Yehuda <muli@il.ibm.com>, IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ASM_X86_64_CALGARY_H +#define _ASM_X86_64_CALGARY_H + +#include <linux/config.h> +#include <linux/spinlock.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <asm/types.h> + +struct iommu_table { + unsigned long it_base; /* mapped address of tce table */ + unsigned long it_hint; /* Hint for next alloc */ + unsigned long *it_map; /* A simple allocation bitmap for now */ + spinlock_t it_lock; /* Protects it_map */ + unsigned int it_size; /* Size of iommu table in entries */ + unsigned char it_busno; /* Bus number this table belongs to */ + void __iomem *bbar; + u64 tar_val; + struct timer_list watchdog_timer; +}; + +#define TCE_TABLE_SIZE_UNSPECIFIED ~0 +#define TCE_TABLE_SIZE_64K 0 +#define TCE_TABLE_SIZE_128K 1 +#define TCE_TABLE_SIZE_256K 2 +#define TCE_TABLE_SIZE_512K 3 +#define TCE_TABLE_SIZE_1M 4 +#define TCE_TABLE_SIZE_2M 5 +#define TCE_TABLE_SIZE_4M 6 +#define TCE_TABLE_SIZE_8M 7 + +#ifdef CONFIG_CALGARY_IOMMU +extern int calgary_iommu_init(void); +extern void detect_calgary(void); +#else +static inline int calgary_iommu_init(void) { return 1; } +static inline void detect_calgary(void) { return; } +#endif + +static inline unsigned int bus_to_phb(unsigned char busno) +{ + return ((busno % 15 == 0) ? 0 : busno / 2 + 1); +} + +#endif /* _ASM_X86_64_CALGARY_H */ diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h index 662964b74e3..ee792faaca0 100644 --- a/include/asm-x86_64/cpufeature.h +++ b/include/asm-x86_64/cpufeature.h @@ -46,6 +46,7 @@ #define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */ #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ #define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSR optimizations */ +#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */ #define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ #define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ #define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */ @@ -65,6 +66,8 @@ #define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */ #define X86_FEATURE_SYNC_RDTSC (3*32+6) /* RDTSC syncs CPU core */ #define X86_FEATURE_FXSAVE_LEAK (3*32+7) /* FIP/FOP/FDP leaks through FXSAVE */ +#define X86_FEATURE_UP (3*32+8) /* SMP kernel running on UP */ +#define X86_FEATURE_ARCH_PERFMON (3*32+9) /* Intel Architectural PerfMon */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h index 498f66df36b..b6da83dcc7a 100644 --- a/include/asm-x86_64/dma-mapping.h +++ b/include/asm-x86_64/dma-mapping.h @@ -55,6 +55,13 @@ extern dma_addr_t bad_dma_address; extern struct dma_mapping_ops* dma_ops; extern int iommu_merge; +static inline int valid_dma_direction(int dma_direction) +{ + return ((dma_direction == DMA_BIDIRECTIONAL) || + (dma_direction == DMA_TO_DEVICE) || + (dma_direction == DMA_FROM_DEVICE)); +} + static inline int dma_mapping_error(dma_addr_t dma_addr) { if (dma_ops->mapping_error) @@ -72,6 +79,7 @@ static inline dma_addr_t dma_map_single(struct device *hwdev, void *ptr, size_t size, int direction) { + BUG_ON(!valid_dma_direction(direction)); return dma_ops->map_single(hwdev, ptr, size, direction); } @@ -79,6 +87,7 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size, int direction) { + BUG_ON(!valid_dma_direction(direction)); dma_ops->unmap_single(dev, addr, size, direction); } @@ -91,6 +100,7 @@ static inline void dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, size_t size, int direction) { + BUG_ON(!valid_dma_direction(direction)); if (dma_ops->sync_single_for_cpu) dma_ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); @@ -101,6 +111,7 @@ static inline void dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, size_t size, int direction) { + BUG_ON(!valid_dma_direction(direction)); if (dma_ops->sync_single_for_device) dma_ops->sync_single_for_device(hwdev, dma_handle, size, direction); @@ -111,6 +122,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, unsigned long offset, size_t size, int direction) { + BUG_ON(!valid_dma_direction(direction)); if (dma_ops->sync_single_range_for_cpu) { dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction); } @@ -122,6 +134,7 @@ static inline void dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, unsigned long offset, size_t size, int direction) { + BUG_ON(!valid_dma_direction(direction)); if (dma_ops->sync_single_range_for_device) dma_ops->sync_single_range_for_device(hwdev, dma_handle, offset, size, direction); @@ -133,6 +146,7 @@ static inline void dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, int nelems, int direction) { + BUG_ON(!valid_dma_direction(direction)); if (dma_ops->sync_sg_for_cpu) dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); flush_write_buffers(); @@ -142,6 +156,7 @@ static inline void dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, int nelems, int direction) { + BUG_ON(!valid_dma_direction(direction)); if (dma_ops->sync_sg_for_device) { dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction); } @@ -152,6 +167,7 @@ dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, static inline int dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction) { + BUG_ON(!valid_dma_direction(direction)); return dma_ops->map_sg(hwdev, sg, nents, direction); } @@ -159,6 +175,7 @@ static inline void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction) { + BUG_ON(!valid_dma_direction(direction)); dma_ops->unmap_sg(hwdev, sg, nents, direction); } diff --git a/include/asm-x86_64/dma.h b/include/asm-x86_64/dma.h index c556208d3dd..a37c16f0628 100644 --- a/include/asm-x86_64/dma.h +++ b/include/asm-x86_64/dma.h @@ -1,4 +1,4 @@ -/* $Id: dma.h,v 1.1.1.1 2001/04/19 20:00:38 ak Exp $ +/* * linux/include/asm/dma.h: Defines for using and allocating dma channels. * Written by Hennus Bergman, 1992. * High DMA channel support & info by Hannu Savolainen diff --git a/include/asm-x86_64/gart-mapping.h b/include/asm-x86_64/gart-mapping.h deleted file mode 100644 index ada497b0b55..00000000000 --- a/include/asm-x86_64/gart-mapping.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef _X8664_GART_MAPPING_H -#define _X8664_GART_MAPPING_H 1 - -#include <linux/types.h> -#include <asm/types.h> - -struct device; - -extern void* -gart_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp); - -extern int -gart_dma_supported(struct device *hwdev, u64 mask); - -#endif /* _X8664_GART_MAPPING_H */ diff --git a/include/asm-x86_64/hpet.h b/include/asm-x86_64/hpet.h index 18ff7ee9e77..b39098408b6 100644 --- a/include/asm-x86_64/hpet.h +++ b/include/asm-x86_64/hpet.h @@ -55,7 +55,7 @@ extern int is_hpet_enabled(void); extern int hpet_rtc_timer_init(void); -extern int oem_force_hpet_timer(void); +extern int apic_is_clustered_box(void); extern int hpet_use_timer; diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h index 3de96fd86a7..93187746278 100644 --- a/include/asm-x86_64/hw_irq.h +++ b/include/asm-x86_64/hw_irq.h @@ -12,8 +12,6 @@ * <tomsoft@informatik.tu-chemnitz.de> * * hacked by Andi Kleen for x86-64. - * - * $Id: hw_irq.h,v 1.24 2001/09/14 20:55:03 vojtech Exp $ */ #ifndef __ASSEMBLY__ @@ -126,7 +124,7 @@ asmlinkage void IRQ_NAME(nr); \ __asm__( \ "\n.p2align\n" \ "IRQ" #nr "_interrupt:\n\t" \ - "push $" #nr "-256 ; " \ + "push $~(" #nr ") ; " \ "jmp common_interrupt"); #if defined(CONFIG_X86_IO_APIC) diff --git a/include/asm-x86_64/ia32_unistd.h b/include/asm-x86_64/ia32_unistd.h index b4f4b172b15..5b52ce50733 100644 --- a/include/asm-x86_64/ia32_unistd.h +++ b/include/asm-x86_64/ia32_unistd.h @@ -4,317 +4,15 @@ /* * This file contains the system call numbers of the ia32 port, * this is for the kernel only. + * Only add syscalls here where some part of the kernel needs to know + * the number. This should be otherwise in sync with asm-i386/unistd.h. -AK */ #define __NR_ia32_restart_syscall 0 #define __NR_ia32_exit 1 -#define __NR_ia32_fork 2 #define __NR_ia32_read 3 #define __NR_ia32_write 4 -#define __NR_ia32_open 5 -#define __NR_ia32_close 6 -#define __NR_ia32_waitpid 7 -#define __NR_ia32_creat 8 -#define __NR_ia32_link 9 -#define __NR_ia32_unlink 10 -#define __NR_ia32_execve 11 -#define __NR_ia32_chdir 12 -#define __NR_ia32_time 13 -#define __NR_ia32_mknod 14 -#define __NR_ia32_chmod 15 -#define __NR_ia32_lchown 16 -#define __NR_ia32_break 17 -#define __NR_ia32_oldstat 18 -#define __NR_ia32_lseek 19 -#define __NR_ia32_getpid 20 -#define __NR_ia32_mount 21 -#define __NR_ia32_umount 22 -#define __NR_ia32_setuid 23 -#define __NR_ia32_getuid 24 -#define __NR_ia32_stime 25 -#define __NR_ia32_ptrace 26 -#define __NR_ia32_alarm 27 -#define __NR_ia32_oldfstat 28 -#define __NR_ia32_pause 29 -#define __NR_ia32_utime 30 -#define __NR_ia32_stty 31 -#define __NR_ia32_gtty 32 -#define __NR_ia32_access 33 -#define __NR_ia32_nice 34 -#define __NR_ia32_ftime 35 -#define __NR_ia32_sync 36 -#define __NR_ia32_kill 37 -#define __NR_ia32_rename 38 -#define __NR_ia32_mkdir 39 -#define __NR_ia32_rmdir 40 -#define __NR_ia32_dup 41 -#define __NR_ia32_pipe 42 -#define __NR_ia32_times 43 -#define __NR_ia32_prof 44 -#define __NR_ia32_brk 45 -#define __NR_ia32_setgid 46 -#define __NR_ia32_getgid 47 -#define __NR_ia32_signal 48 -#define __NR_ia32_geteuid 49 -#define __NR_ia32_getegid 50 -#define __NR_ia32_acct 51 -#define __NR_ia32_umount2 52 -#define __NR_ia32_lock 53 -#define __NR_ia32_ioctl 54 -#define __NR_ia32_fcntl 55 -#define __NR_ia32_mpx 56 -#define __NR_ia32_setpgid 57 -#define __NR_ia32_ulimit 58 -#define __NR_ia32_oldolduname 59 -#define __NR_ia32_umask 60 -#define __NR_ia32_chroot 61 -#define __NR_ia32_ustat 62 -#define __NR_ia32_dup2 63 -#define __NR_ia32_getppid 64 -#define __NR_ia32_getpgrp 65 -#define __NR_ia32_setsid 66 -#define __NR_ia32_sigaction 67 -#define __NR_ia32_sgetmask 68 -#define __NR_ia32_ssetmask 69 -#define __NR_ia32_setreuid 70 -#define __NR_ia32_setregid 71 -#define __NR_ia32_sigsuspend 72 -#define __NR_ia32_sigpending 73 -#define __NR_ia32_sethostname 74 -#define __NR_ia32_setrlimit 75 -#define __NR_ia32_getrlimit 76 /* Back compatible 2Gig limited rlimit */ -#define __NR_ia32_getrusage 77 -#define __NR_ia32_gettimeofday 78 -#define __NR_ia32_settimeofday 79 -#define __NR_ia32_getgroups 80 -#define __NR_ia32_setgroups 81 -#define __NR_ia32_select 82 -#define __NR_ia32_symlink 83 -#define __NR_ia32_oldlstat 84 -#define __NR_ia32_readlink 85 -#define __NR_ia32_uselib 86 -#define __NR_ia32_swapon 87 -#define __NR_ia32_reboot 88 -#define __NR_ia32_readdir 89 -#define __NR_ia32_mmap 90 -#define __NR_ia32_munmap 91 -#define __NR_ia32_truncate 92 -#define __NR_ia32_ftruncate 93 -#define __NR_ia32_fchmod 94 -#define __NR_ia32_fchown 95 -#define __NR_ia32_getpriority 96 -#define __NR_ia32_setpriority 97 -#define __NR_ia32_profil 98 -#define __NR_ia32_statfs 99 -#define __NR_ia32_fstatfs 100 -#define __NR_ia32_ioperm 101 -#define __NR_ia32_socketcall 102 -#define __NR_ia32_syslog 103 -#define __NR_ia32_setitimer 104 -#define __NR_ia32_getitimer 105 -#define __NR_ia32_stat 106 -#define __NR_ia32_lstat 107 -#define __NR_ia32_fstat 108 -#define __NR_ia32_olduname 109 -#define __NR_ia32_iopl 110 -#define __NR_ia32_vhangup 111 -#define __NR_ia32_idle 112 -#define __NR_ia32_vm86old 113 -#define __NR_ia32_wait4 114 -#define __NR_ia32_swapoff 115 -#define __NR_ia32_sysinfo 116 -#define __NR_ia32_ipc 117 -#define __NR_ia32_fsync 118 -#define __NR_ia32_sigreturn 119 -#define __NR_ia32_clone 120 -#define __NR_ia32_setdomainname 121 -#define __NR_ia32_uname 122 -#define __NR_ia32_modify_ldt 123 -#define __NR_ia32_adjtimex 124 -#define __NR_ia32_mprotect 125 -#define __NR_ia32_sigprocmask 126 -#define __NR_ia32_create_module 127 -#define __NR_ia32_init_module 128 -#define __NR_ia32_delete_module 129 -#define __NR_ia32_get_kernel_syms 130 -#define __NR_ia32_quotactl 131 -#define __NR_ia32_getpgid 132 -#define __NR_ia32_fchdir 133 -#define __NR_ia32_bdflush 134 -#define __NR_ia32_sysfs 135 -#define __NR_ia32_personality 136 -#define __NR_ia32_afs_syscall 137 /* Syscall for Andrew File System */ -#define __NR_ia32_setfsuid 138 -#define __NR_ia32_setfsgid 139 -#define __NR_ia32__llseek 140 -#define __NR_ia32_getdents 141 -#define __NR_ia32__newselect 142 -#define __NR_ia32_flock 143 -#define __NR_ia32_msync 144 -#define __NR_ia32_readv 145 -#define __NR_ia32_writev 146 -#define __NR_ia32_getsid 147 -#define __NR_ia32_fdatasync 148 -#define __NR_ia32__sysctl 149 -#define __NR_ia32_mlock 150 -#define __NR_ia32_munlock 151 -#define __NR_ia32_mlockall 152 -#define __NR_ia32_munlockall 153 -#define __NR_ia32_sched_setparam 154 -#define __NR_ia32_sched_getparam 155 -#define __NR_ia32_sched_setscheduler 156 -#define __NR_ia32_sched_getscheduler 157 -#define __NR_ia32_sched_yield 158 -#define __NR_ia32_sched_get_priority_max 159 -#define __NR_ia32_sched_get_priority_min 160 -#define __NR_ia32_sched_rr_get_interval 161 -#define __NR_ia32_nanosleep 162 -#define __NR_ia32_mremap 163 -#define __NR_ia32_setresuid 164 -#define __NR_ia32_getresuid 165 -#define __NR_ia32_vm86 166 -#define __NR_ia32_query_module 167 -#define __NR_ia32_poll 168 -#define __NR_ia32_nfsservctl 169 -#define __NR_ia32_setresgid 170 -#define __NR_ia32_getresgid 171 -#define __NR_ia32_prctl 172 +#define __NR_ia32_sigreturn 119 #define __NR_ia32_rt_sigreturn 173 -#define __NR_ia32_rt_sigaction 174 -#define __NR_ia32_rt_sigprocmask 175 -#define __NR_ia32_rt_sigpending 176 -#define __NR_ia32_rt_sigtimedwait 177 -#define __NR_ia32_rt_sigqueueinfo 178 -#define __NR_ia32_rt_sigsuspend 179 -#define __NR_ia32_pread 180 -#define __NR_ia32_pwrite 181 -#define __NR_ia32_chown 182 -#define __NR_ia32_getcwd 183 -#define __NR_ia32_capget 184 -#define __NR_ia32_capset 185 -#define __NR_ia32_sigaltstack 186 -#define __NR_ia32_sendfile 187 -#define __NR_ia32_getpmsg 188 /* some people actually want streams */ -#define __NR_ia32_putpmsg 189 /* some people actually want streams */ -#define __NR_ia32_vfork 190 -#define __NR_ia32_ugetrlimit 191 /* SuS compliant getrlimit */ -#define __NR_ia32_mmap2 192 -#define __NR_ia32_truncate64 193 -#define __NR_ia32_ftruncate64 194 -#define __NR_ia32_stat64 195 -#define __NR_ia32_lstat64 196 -#define __NR_ia32_fstat64 197 -#define __NR_ia32_lchown32 198 -#define __NR_ia32_getuid32 199 -#define __NR_ia32_getgid32 200 -#define __NR_ia32_geteuid32 201 -#define __NR_ia32_getegid32 202 -#define __NR_ia32_setreuid32 203 -#define __NR_ia32_setregid32 204 -#define __NR_ia32_getgroups32 205 -#define __NR_ia32_setgroups32 206 -#define __NR_ia32_fchown32 207 -#define __NR_ia32_setresuid32 208 -#define __NR_ia32_getresuid32 209 -#define __NR_ia32_setresgid32 210 -#define __NR_ia32_getresgid32 211 -#define __NR_ia32_chown32 212 -#define __NR_ia32_setuid32 213 -#define __NR_ia32_setgid32 214 -#define __NR_ia32_setfsuid32 215 -#define __NR_ia32_setfsgid32 216 -#define __NR_ia32_pivot_root 217 -#define __NR_ia32_mincore 218 -#define __NR_ia32_madvise 219 -#define __NR_ia32_madvise1 219 /* delete when C lib stub is removed */ -#define __NR_ia32_getdents64 220 -#define __NR_ia32_fcntl64 221 -#define __NR_ia32_tuxcall 222 -#define __NR_ia32_security 223 -#define __NR_ia32_gettid 224 -#define __NR_ia32_readahead 225 -#define __NR_ia32_setxattr 226 -#define __NR_ia32_lsetxattr 227 -#define __NR_ia32_fsetxattr 228 -#define __NR_ia32_getxattr 229 -#define __NR_ia32_lgetxattr 230 -#define __NR_ia32_fgetxattr 231 -#define __NR_ia32_listxattr 232 -#define __NR_ia32_llistxattr 233 -#define __NR_ia32_flistxattr 234 -#define __NR_ia32_removexattr 235 -#define __NR_ia32_lremovexattr 236 -#define __NR_ia32_fremovexattr 237 -#define __NR_ia32_tkill 238 -#define __NR_ia32_sendfile64 239 -#define __NR_ia32_futex 240 -#define __NR_ia32_sched_setaffinity 241 -#define __NR_ia32_sched_getaffinity 242 -#define __NR_ia32_set_thread_area 243 -#define __NR_ia32_get_thread_area 244 -#define __NR_ia32_io_setup 245 -#define __NR_ia32_io_destroy 246 -#define __NR_ia32_io_getevents 247 -#define __NR_ia32_io_submit 248 -#define __NR_ia32_io_cancel 249 -#define __NR_ia32_exit_group 252 -#define __NR_ia32_lookup_dcookie 253 -#define __NR_ia32_sys_epoll_create 254 -#define __NR_ia32_sys_epoll_ctl 255 -#define __NR_ia32_sys_epoll_wait 256 -#define __NR_ia32_remap_file_pages 257 -#define __NR_ia32_set_tid_address 258 -#define __NR_ia32_timer_create 259 -#define __NR_ia32_timer_settime (__NR_ia32_timer_create+1) -#define __NR_ia32_timer_gettime (__NR_ia32_timer_create+2) -#define __NR_ia32_timer_getoverrun (__NR_ia32_timer_create+3) -#define __NR_ia32_timer_delete (__NR_ia32_timer_create+4) -#define __NR_ia32_clock_settime (__NR_ia32_timer_create+5) -#define __NR_ia32_clock_gettime (__NR_ia32_timer_create+6) -#define __NR_ia32_clock_getres (__NR_ia32_timer_create+7) -#define __NR_ia32_clock_nanosleep (__NR_ia32_timer_create+8) -#define __NR_ia32_statfs64 268 -#define __NR_ia32_fstatfs64 269 -#define __NR_ia32_tgkill 270 -#define __NR_ia32_utimes 271 -#define __NR_ia32_fadvise64_64 272 -#define __NR_ia32_vserver 273 -#define __NR_ia32_mbind 274 -#define __NR_ia32_get_mempolicy 275 -#define __NR_ia32_set_mempolicy 276 -#define __NR_ia32_mq_open 277 -#define __NR_ia32_mq_unlink (__NR_ia32_mq_open+1) -#define __NR_ia32_mq_timedsend (__NR_ia32_mq_open+2) -#define __NR_ia32_mq_timedreceive (__NR_ia32_mq_open+3) -#define __NR_ia32_mq_notify (__NR_ia32_mq_open+4) -#define __NR_ia32_mq_getsetattr (__NR_ia32_mq_open+5) -#define __NR_ia32_kexec 283 -#define __NR_ia32_waitid 284 -/* #define __NR_sys_setaltroot 285 */ -#define __NR_ia32_add_key 286 -#define __NR_ia32_request_key 287 -#define __NR_ia32_keyctl 288 -#define __NR_ia32_ioprio_set 289 -#define __NR_ia32_ioprio_get 290 -#define __NR_ia32_inotify_init 291 -#define __NR_ia32_inotify_add_watch 292 -#define __NR_ia32_inotify_rm_watch 293 -#define __NR_ia32_migrate_pages 294 -#define __NR_ia32_openat 295 -#define __NR_ia32_mkdirat 296 -#define __NR_ia32_mknodat 297 -#define __NR_ia32_fchownat 298 -#define __NR_ia32_futimesat 299 -#define __NR_ia32_fstatat64 300 -#define __NR_ia32_unlinkat 301 -#define __NR_ia32_renameat 302 -#define __NR_ia32_linkat 303 -#define __NR_ia32_symlinkat 304 -#define __NR_ia32_readlinkat 305 -#define __NR_ia32_fchmodat 306 -#define __NR_ia32_faccessat 307 -#define __NR_ia32_pselect6 308 -#define __NR_ia32_ppoll 309 -#define __NR_ia32_unshare 310 #endif /* _ASM_X86_64_IA32_UNISTD_H_ */ diff --git a/include/asm-x86_64/intel_arch_perfmon.h b/include/asm-x86_64/intel_arch_perfmon.h new file mode 100644 index 00000000000..59c39643156 --- /dev/null +++ b/include/asm-x86_64/intel_arch_perfmon.h @@ -0,0 +1,19 @@ +#ifndef X86_64_INTEL_ARCH_PERFMON_H +#define X86_64_INTEL_ARCH_PERFMON_H 1 + +#define MSR_ARCH_PERFMON_PERFCTR0 0xc1 +#define MSR_ARCH_PERFMON_PERFCTR1 0xc2 + +#define MSR_ARCH_PERFMON_EVENTSEL0 0x186 +#define MSR_ARCH_PERFMON_EVENTSEL1 0x187 + +#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) +#define ARCH_PERFMON_EVENTSEL_INT (1 << 20) +#define ARCH_PERFMON_EVENTSEL_OS (1 << 17) +#define ARCH_PERFMON_EVENTSEL_USR (1 << 16) + +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c) +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0) + +#endif /* X86_64_INTEL_ARCH_PERFMON_H */ diff --git a/include/asm-x86_64/k8.h b/include/asm-x86_64/k8.h new file mode 100644 index 00000000000..699dd6961ed --- /dev/null +++ b/include/asm-x86_64/k8.h @@ -0,0 +1,14 @@ +#ifndef _ASM_K8_H +#define _ASM_K8_H 1 + +#include <linux/pci.h> + +extern struct pci_device_id k8_nb_ids[]; + +extern int early_is_k8_nb(u32 value); +extern struct pci_dev **k8_northbridges; +extern int num_k8_northbridges; +extern int cache_k8_northbridges(void); +extern void k8_flush_garts(void); + +#endif diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h index cf795631d9b..cd52c7f33bc 100644 --- a/include/asm-x86_64/kdebug.h +++ b/include/asm-x86_64/kdebug.h @@ -15,6 +15,8 @@ struct die_args { extern int register_die_notifier(struct notifier_block *); extern int unregister_die_notifier(struct notifier_block *); +extern int register_page_fault_notifier(struct notifier_block *); +extern int unregister_page_fault_notifier(struct notifier_block *); extern struct atomic_notifier_head die_chain; /* Grossly misnamed. */ diff --git a/include/asm-x86_64/kprobes.h b/include/asm-x86_64/kprobes.h index 98a1e95ddb9..d36febd9bb1 100644 --- a/include/asm-x86_64/kprobes.h +++ b/include/asm-x86_64/kprobes.h @@ -43,6 +43,7 @@ typedef u8 kprobe_opcode_t; #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry #define ARCH_SUPPORTS_KRETPROBES +#define ARCH_INACTIVE_KPROBE_COUNT 1 void kretprobe_trampoline(void); extern void arch_remove_kprobe(struct kprobe *p); diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h index cd17945bf21..e769e620022 100644 --- a/include/asm-x86_64/local.h +++ b/include/asm-x86_64/local.h @@ -59,12 +59,26 @@ static inline void local_sub(long i, local_t *v) * This could be done better if we moved the per cpu data directly * after GS. */ -#define cpu_local_read(v) local_read(&__get_cpu_var(v)) -#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) -#define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) -#define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) -#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) -#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) + +/* Need to disable preemption for the cpu local counters otherwise we could + still access a variable of a previous CPU in a non atomic way. */ +#define cpu_local_wrap_v(v) \ + ({ local_t res__; \ + preempt_disable(); \ + res__ = (v); \ + preempt_enable(); \ + res__; }) +#define cpu_local_wrap(v) \ + ({ preempt_disable(); \ + v; \ + preempt_enable(); }) \ + +#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v))) +#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i))) +#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v))) +#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v))) +#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v))) +#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v))) #define __cpu_local_inc(v) cpu_local_inc(v) #define __cpu_local_dec(v) cpu_local_dec(v) diff --git a/include/asm-x86_64/mce.h b/include/asm-x86_64/mce.h index 7229785094e..d13687dfd69 100644 --- a/include/asm-x86_64/mce.h +++ b/include/asm-x86_64/mce.h @@ -67,13 +67,22 @@ struct mce_log { /* Software defined banks */ #define MCE_EXTENDED_BANK 128 #define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0 -#define MCE_THRESHOLD_BASE MCE_EXTENDED_BANK + 1 /* MCE_AMD */ -#define MCE_THRESHOLD_DRAM_ECC MCE_THRESHOLD_BASE + 4 + +#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) /* MCE_AMD */ +#define K8_MCE_THRESHOLD_BANK_0 (MCE_THRESHOLD_BASE + 0 * 9) +#define K8_MCE_THRESHOLD_BANK_1 (MCE_THRESHOLD_BASE + 1 * 9) +#define K8_MCE_THRESHOLD_BANK_2 (MCE_THRESHOLD_BASE + 2 * 9) +#define K8_MCE_THRESHOLD_BANK_3 (MCE_THRESHOLD_BASE + 3 * 9) +#define K8_MCE_THRESHOLD_BANK_4 (MCE_THRESHOLD_BASE + 4 * 9) +#define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9) +#define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0) #ifdef __KERNEL__ #include <asm/atomic.h> void mce_log(struct mce *m); +DECLARE_PER_CPU(struct sys_device, device_mce); + #ifdef CONFIG_X86_MCE_INTEL void mce_intel_feature_init(struct cpuinfo_x86 *c); #else diff --git a/include/asm-x86_64/mutex.h b/include/asm-x86_64/mutex.h index 11fbee2bd6c..06fab6de2a8 100644 --- a/include/asm-x86_64/mutex.h +++ b/include/asm-x86_64/mutex.h @@ -24,7 +24,7 @@ do { \ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ \ __asm__ __volatile__( \ - LOCK " decl (%%rdi) \n" \ + LOCK_PREFIX " decl (%%rdi) \n" \ " js 2f \n" \ "1: \n" \ \ @@ -74,7 +74,7 @@ do { \ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ \ __asm__ __volatile__( \ - LOCK " incl (%%rdi) \n" \ + LOCK_PREFIX " incl (%%rdi) \n" \ " jle 2f \n" \ "1: \n" \ \ diff --git a/include/asm-x86_64/nmi.h b/include/asm-x86_64/nmi.h index d3abfc6a8fd..efb45c894d7 100644 --- a/include/asm-x86_64/nmi.h +++ b/include/asm-x86_64/nmi.h @@ -5,26 +5,27 @@ #define ASM_NMI_H #include <linux/pm.h> +#include <asm/io.h> struct pt_regs; - + typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu); - -/** + +/** * set_nmi_callback * * Set a handler for an NMI. Only one handler may be * set. Return 1 if the NMI was handled. */ void set_nmi_callback(nmi_callback_t callback); - -/** + +/** * unset_nmi_callback * * Remove the handler previously set. */ void unset_nmi_callback(void); - + #ifdef CONFIG_PM /** Replace the PM callback routine for NMI. */ @@ -56,4 +57,21 @@ extern int unknown_nmi_panic; extern int check_nmi_watchdog(void); +extern void setup_apic_nmi_watchdog (void); +extern int reserve_lapic_nmi(void); +extern void release_lapic_nmi(void); +extern void disable_timer_nmi_watchdog(void); +extern void enable_timer_nmi_watchdog(void); +extern void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); + +extern void nmi_watchdog_default(void); +extern int setup_nmi_watchdog(char *); + +extern unsigned int nmi_watchdog; +#define NMI_DEFAULT -1 +#define NMI_NONE 0 +#define NMI_IO_APIC 1 +#define NMI_LOCAL_APIC 2 +#define NMI_INVALID 3 + #endif /* ASM_NMI_H */ diff --git a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h index 2db0620d544..49c5e928059 100644 --- a/include/asm-x86_64/pci.h +++ b/include/asm-x86_64/pci.h @@ -39,8 +39,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); #include <asm/scatterlist.h> #include <linux/string.h> #include <asm/page.h> -#include <linux/dma-mapping.h> /* for have_iommu */ +extern void pci_iommu_alloc(void); extern int iommu_setup(char *opt); /* The PCI address space does equal the physical memory @@ -52,7 +52,7 @@ extern int iommu_setup(char *opt); */ #define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) -#ifdef CONFIG_GART_IOMMU +#if defined(CONFIG_IOMMU) || defined(CONFIG_CALGARY_IOMMU) /* * x86-64 always supports DAC, but sometimes it is useful to force diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index 31e83c3bd02..a31ab4e68a9 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h @@ -337,14 +337,8 @@ static inline int pmd_large(pmd_t pte) { /* to find an entry in a page-table-directory. */ #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) #define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address)) -#define pud_offset_k(pgd, addr) pud_offset(pgd, addr) #define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT) -static inline pud_t *__pud_offset_k(pud_t *pud, unsigned long address) -{ - return pud + pud_index(address); -} - /* PMD - Level 2 access */ #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK)) #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h index 3061a38a3b1..3b3c1217fe6 100644 --- a/include/asm-x86_64/processor.h +++ b/include/asm-x86_64/processor.h @@ -69,7 +69,11 @@ struct cpuinfo_x86 { cpumask_t llc_shared_map; /* cpus sharing the last level cache */ #endif __u8 apicid; +#ifdef CONFIG_SMP __u8 booted_cores; /* number of cores as seen by OS */ + __u8 phys_proc_id; /* Physical Processor id. */ + __u8 cpu_core_id; /* Core id. */ +#endif } ____cacheline_aligned; #define X86_VENDOR_INTEL 0 @@ -96,6 +100,7 @@ extern char ignore_irq13; extern void identify_cpu(struct cpuinfo_x86 *); extern void print_cpu_info(struct cpuinfo_x86 *); extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); +extern unsigned short num_cache_leaves; /* * EFLAGS bits diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h index 8abf2a43c94..038fe1f47e6 100644 --- a/include/asm-x86_64/proto.h +++ b/include/asm-x86_64/proto.h @@ -37,7 +37,6 @@ extern void ia32_sysenter_target(void); extern void config_acpi_tables(void); extern void ia32_syscall(void); -extern void iommu_hole_init(void); extern int pmtimer_mark_offset(void); extern void pmtimer_resume(void); @@ -75,7 +74,7 @@ extern void main_timer_handler(struct pt_regs *regs); extern unsigned long end_pfn_map; -extern void show_trace(unsigned long * rsp); +extern void show_trace(struct task_struct *, struct pt_regs *, unsigned long * rsp); extern void show_registers(struct pt_regs *regs); extern void exception_table_check(void); @@ -101,13 +100,9 @@ extern int unsynchronized_tsc(void); extern void select_idle_routine(const struct cpuinfo_x86 *c); -extern void gart_parse_options(char *); -extern void __init no_iommu_init(void); - extern unsigned long table_start, table_end; extern int exception_trace; -extern int force_iommu, no_iommu; extern int using_apic_timer; extern int disable_apic; extern unsigned cpu_khz; @@ -116,7 +111,13 @@ extern int skip_ioapic_setup; extern int acpi_ht; extern int acpi_disabled; -#ifdef CONFIG_GART_IOMMU +extern void no_iommu_init(void); +extern int force_iommu, no_iommu; +extern int iommu_detected; +#ifdef CONFIG_IOMMU +extern void gart_iommu_init(void); +extern void gart_parse_options(char *); +extern void iommu_hole_init(void); extern int fallback_aper_order; extern int fallback_aper_force; extern int iommu_aperture; diff --git a/include/asm-x86_64/rwlock.h b/include/asm-x86_64/rwlock.h index 9942cc39306..dea0e945926 100644 --- a/include/asm-x86_64/rwlock.h +++ b/include/asm-x86_64/rwlock.h @@ -24,7 +24,7 @@ #define RW_LOCK_BIAS_STR "0x01000000" #define __build_read_lock_ptr(rw, helper) \ - asm volatile(LOCK "subl $1,(%0)\n\t" \ + asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t" \ "js 2f\n" \ "1:\n" \ LOCK_SECTION_START("") \ @@ -34,7 +34,7 @@ ::"a" (rw) : "memory") #define __build_read_lock_const(rw, helper) \ - asm volatile(LOCK "subl $1,%0\n\t" \ + asm volatile(LOCK_PREFIX "subl $1,%0\n\t" \ "js 2f\n" \ "1:\n" \ LOCK_SECTION_START("") \ @@ -54,7 +54,7 @@ } while (0) #define __build_write_lock_ptr(rw, helper) \ - asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ + asm volatile(LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ "jnz 2f\n" \ "1:\n" \ LOCK_SECTION_START("") \ @@ -64,7 +64,7 @@ ::"a" (rw) : "memory") #define __build_write_lock_const(rw, helper) \ - asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ + asm volatile(LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ "jnz 2f\n" \ "1:\n" \ LOCK_SECTION_START("") \ diff --git a/include/asm-x86_64/semaphore.h b/include/asm-x86_64/semaphore.h index a389aa6fe80..064df08b9a0 100644 --- a/include/asm-x86_64/semaphore.h +++ b/include/asm-x86_64/semaphore.h @@ -106,7 +106,7 @@ static inline void down(struct semaphore * sem) __asm__ __volatile__( "# atomic down operation\n\t" - LOCK "decl %0\n\t" /* --sem->count */ + LOCK_PREFIX "decl %0\n\t" /* --sem->count */ "js 2f\n" "1:\n" LOCK_SECTION_START("") @@ -130,7 +130,7 @@ static inline int down_interruptible(struct semaphore * sem) __asm__ __volatile__( "# atomic interruptible down operation\n\t" - LOCK "decl %1\n\t" /* --sem->count */ + LOCK_PREFIX "decl %1\n\t" /* --sem->count */ "js 2f\n\t" "xorl %0,%0\n" "1:\n" @@ -154,7 +154,7 @@ static inline int down_trylock(struct semaphore * sem) __asm__ __volatile__( "# atomic interruptible down operation\n\t" - LOCK "decl %1\n\t" /* --sem->count */ + LOCK_PREFIX "decl %1\n\t" /* --sem->count */ "js 2f\n\t" "xorl %0,%0\n" "1:\n" @@ -178,7 +178,7 @@ static inline void up(struct semaphore * sem) { __asm__ __volatile__( "# atomic up operation\n\t" - LOCK "incl %0\n\t" /* ++sem->count */ + LOCK_PREFIX "incl %0\n\t" /* ++sem->count */ "jle 2f\n" "1:\n" LOCK_SECTION_START("") diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h index 7686b9b25ae..6805e1feb30 100644 --- a/include/asm-x86_64/smp.h +++ b/include/asm-x86_64/smp.h @@ -53,8 +53,6 @@ extern int smp_call_function_single(int cpuid, void (*func) (void *info), extern cpumask_t cpu_sibling_map[NR_CPUS]; extern cpumask_t cpu_core_map[NR_CPUS]; -extern u8 phys_proc_id[NR_CPUS]; -extern u8 cpu_core_id[NR_CPUS]; extern u8 cpu_llc_id[NR_CPUS]; #define SMP_TRAMPOLINE_BASE 0x6000 diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h index 5d8a5e3589f..8d3421996f9 100644 --- a/include/asm-x86_64/spinlock.h +++ b/include/asm-x86_64/spinlock.h @@ -31,15 +31,19 @@ "jmp 1b\n" \ LOCK_SECTION_END +#define __raw_spin_lock_string_up \ + "\n\tdecl %0" + #define __raw_spin_unlock_string \ "movl $1,%0" \ :"=m" (lock->slock) : : "memory" static inline void __raw_spin_lock(raw_spinlock_t *lock) { - __asm__ __volatile__( - __raw_spin_lock_string - :"=m" (lock->slock) : : "memory"); + alternative_smp( + __raw_spin_lock_string, + __raw_spin_lock_string_up, + "=m" (lock->slock) : : "memory"); } #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) diff --git a/include/asm-x86_64/string.h b/include/asm-x86_64/string.h index ee6bf275349..9505d9f4bea 100644 --- a/include/asm-x86_64/string.h +++ b/include/asm-x86_64/string.h @@ -6,7 +6,8 @@ /* Written 2002 by Andi Kleen */ /* Only used for special circumstances. Stolen from i386/string.h */ -static inline void * __inline_memcpy(void * to, const void * from, size_t n) +static __always_inline void * +__inline_memcpy(void * to, const void * from, size_t n) { unsigned long d0, d1, d2; __asm__ __volatile__( diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h index f48e0dad8b3..68e559f3631 100644 --- a/include/asm-x86_64/system.h +++ b/include/asm-x86_64/system.h @@ -3,15 +3,10 @@ #include <linux/kernel.h> #include <asm/segment.h> +#include <asm/alternative.h> #ifdef __KERNEL__ -#ifdef CONFIG_SMP -#define LOCK_PREFIX "lock ; " -#else -#define LOCK_PREFIX "" -#endif - #define __STR(x) #x #define STR(x) __STR(x) @@ -34,7 +29,7 @@ "thread_return:\n\t" \ "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \ "movq %P[thread_info](%%rsi),%%r8\n\t" \ - LOCK "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ + LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ "movq %%rax,%%rdi\n\t" \ "jc ret_from_fork\n\t" \ RESTORE_CONTEXT \ @@ -69,82 +64,6 @@ extern void load_gs_index(unsigned); ".previous" \ : :"r" (value), "r" (0)) -#ifdef __KERNEL__ -struct alt_instr { - __u8 *instr; /* original instruction */ - __u8 *replacement; - __u8 cpuid; /* cpuid bit set for replacement */ - __u8 instrlen; /* length of original instruction */ - __u8 replacementlen; /* length of new instruction, <= instrlen */ - __u8 pad[5]; -}; -#endif - -/* - * Alternative instructions for different CPU types or capabilities. - * - * This allows to use optimized instructions even on generic binary - * kernels. - * - * length of oldinstr must be longer or equal the length of newinstr - * It can be padded with nops as needed. - * - * For non barrier like inlines please define new variants - * without volatile and memory clobber. - */ -#define alternative(oldinstr, newinstr, feature) \ - asm volatile ("661:\n\t" oldinstr "\n662:\n" \ - ".section .altinstructions,\"a\"\n" \ - " .align 8\n" \ - " .quad 661b\n" /* label */ \ - " .quad 663f\n" /* new instruction */ \ - " .byte %c0\n" /* feature bit */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */ \ - ".previous" :: "i" (feature) : "memory") - -/* - * Alternative inline assembly with input. - * - * Peculiarities: - * No memory clobber here. - * Argument numbers start with 1. - * Best is to use constraints that are fixed size (like (%1) ... "r") - * If you use variable sized constraints like "m" or "g" in the - * replacement make sure to pad to the worst case length. - */ -#define alternative_input(oldinstr, newinstr, feature, input...) \ - asm volatile ("661:\n\t" oldinstr "\n662:\n" \ - ".section .altinstructions,\"a\"\n" \ - " .align 8\n" \ - " .quad 661b\n" /* label */ \ - " .quad 663f\n" /* new instruction */ \ - " .byte %c0\n" /* feature bit */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */ \ - ".previous" :: "i" (feature), ##input) - -/* Like alternative_input, but with a single output argument */ -#define alternative_io(oldinstr, newinstr, feature, output, input...) \ - asm volatile ("661:\n\t" oldinstr "\n662:\n" \ - ".section .altinstructions,\"a\"\n" \ - " .align 8\n" \ - " .quad 661b\n" /* label */ \ - " .quad 663f\n" /* new instruction */ \ - " .byte %c[feat]\n" /* feature bit */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */ \ - ".previous" : output : [feat] "i" (feature), ##input) - /* * Clear and set 'TS' bit respectively */ @@ -366,5 +285,6 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, void cpu_idle_wait(void); extern unsigned long arch_align_stack(unsigned long sp); +extern void free_init_pages(char *what, unsigned long begin, unsigned long end); #endif diff --git a/include/asm-x86_64/tce.h b/include/asm-x86_64/tce.h new file mode 100644 index 00000000000..ee51d31528d --- /dev/null +++ b/include/asm-x86_64/tce.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2006 Muli Ben-Yehuda <muli@il.ibm.com>, IBM Corporation + * Copyright (C) 2006 Jon Mason <jdmason@us.ibm.com>, IBM Corporation + * + * This file is derived from asm-powerpc/tce.h. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ASM_X86_64_TCE_H +#define _ASM_X86_64_TCE_H + +extern void* tce_table_kva[]; +extern unsigned int specified_table_size; +struct iommu_table; + +#define TCE_ENTRY_SIZE 8 /* in bytes */ + +#define TCE_READ_SHIFT 0 +#define TCE_WRITE_SHIFT 1 +#define TCE_HUBID_SHIFT 2 /* unused */ +#define TCE_RSVD_SHIFT 8 /* unused */ +#define TCE_RPN_SHIFT 12 +#define TCE_UNUSED_SHIFT 48 /* unused */ + +#define TCE_RPN_MASK 0x0000fffffffff000ULL + +extern void tce_build(struct iommu_table *tbl, unsigned long index, + unsigned int npages, unsigned long uaddr, int direction); +extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages); +extern void* alloc_tce_table(void); +extern void free_tce_table(void *tbl); +extern int build_tce_table(struct pci_dev *dev, void __iomem *bbar); + +#endif /* _ASM_X86_64_TCE_H */ diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h index 4ac0e0a3693..2029b00351f 100644 --- a/include/asm-x86_64/thread_info.h +++ b/include/asm-x86_64/thread_info.h @@ -73,8 +73,21 @@ static inline struct thread_info *stack_thread_info(void) } /* thread information allocation */ +#ifdef CONFIG_DEBUG_STACK_USAGE +#define alloc_thread_info(tsk) \ + ({ \ + struct thread_info *ret; \ + \ + ret = ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER)); \ + if (ret) \ + memset(ret, 0, THREAD_SIZE); \ + ret; \ + }) +#else #define alloc_thread_info(tsk) \ ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER)) +#endif + #define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER) #else /* !__ASSEMBLY__ */ @@ -101,7 +114,7 @@ static inline struct thread_info *stack_thread_info(void) #define TIF_IRET 5 /* force IRET */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SECCOMP 8 /* secure computing */ -#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ +/* 16 free */ #define TIF_IA32 17 /* 32bit process */ #define TIF_FORK 18 /* ret_from_fork */ #define TIF_ABI_PENDING 19 @@ -115,7 +128,6 @@ static inline struct thread_info *stack_thread_info(void) #define _TIF_IRET (1<<TIF_IRET) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1<<TIF_SECCOMP) -#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_IA32 (1<<TIF_IA32) #define _TIF_FORK (1<<TIF_FORK) #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) @@ -137,6 +149,9 @@ static inline struct thread_info *stack_thread_info(void) */ #define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ #define TS_COMPAT 0x0002 /* 32bit syscall active */ +#define TS_POLLING 0x0004 /* true if in idle loop and not sleeping */ + +#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING) #endif /* __KERNEL__ */ diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h index 80c4e44d011..6e7a2e976b0 100644 --- a/include/asm-x86_64/topology.h +++ b/include/asm-x86_64/topology.h @@ -7,8 +7,6 @@ #include <asm/mpspec.h> #include <asm/bitops.h> -/* Map the K8 CPU local memory controllers to a simple 1:1 CPU:NODE topology */ - extern cpumask_t cpu_online_map; extern unsigned char cpu_to_node[]; @@ -57,12 +55,12 @@ extern int __node_distance(int, int); #endif #ifdef CONFIG_SMP -#define topology_physical_package_id(cpu) \ - (phys_proc_id[cpu] == BAD_APICID ? -1 : phys_proc_id[cpu]) -#define topology_core_id(cpu) \ - (cpu_core_id[cpu] == BAD_APICID ? 0 : cpu_core_id[cpu]) +#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id) +#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id) #define topology_core_siblings(cpu) (cpu_core_map[cpu]) #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) +#define mc_capable() (boot_cpu_data.x86_max_cores > 1) +#define smt_capable() (smp_num_siblings > 1) #endif #include <asm-generic/topology.h> diff --git a/include/asm-x86_64/unwind.h b/include/asm-x86_64/unwind.h new file mode 100644 index 00000000000..f3e7124effe --- /dev/null +++ b/include/asm-x86_64/unwind.h @@ -0,0 +1,106 @@ +#ifndef _ASM_X86_64_UNWIND_H +#define _ASM_X86_64_UNWIND_H + +/* + * Copyright (C) 2002-2006 Novell, Inc. + * Jan Beulich <jbeulich@novell.com> + * This code is released under version 2 of the GNU GPL. + */ + +#ifdef CONFIG_STACK_UNWIND + +#include <linux/sched.h> +#include <asm/ptrace.h> +#include <asm/uaccess.h> +#include <asm/vsyscall.h> + +struct unwind_frame_info +{ + struct pt_regs regs; + struct task_struct *task; +}; + +#define UNW_PC(frame) (frame)->regs.rip +#define UNW_SP(frame) (frame)->regs.rsp +#ifdef CONFIG_FRAME_POINTER +#define UNW_FP(frame) (frame)->regs.rbp +#define FRAME_RETADDR_OFFSET 8 +#define FRAME_LINK_OFFSET 0 +#define STACK_BOTTOM(tsk) (((tsk)->thread.rsp0 - 1) & ~(THREAD_SIZE - 1)) +#define STACK_TOP(tsk) ((tsk)->thread.rsp0) +#endif +/* Might need to account for the special exception and interrupt handling + stacks here, since normally + EXCEPTION_STACK_ORDER < THREAD_ORDER < IRQSTACK_ORDER, + but the construct is needed only for getting across the stack switch to + the interrupt stack - thus considering the IRQ stack itself is unnecessary, + and the overhead of comparing against all exception handling stacks seems + not desirable. */ +#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1)) + +#define UNW_REGISTER_INFO \ + PTREGS_INFO(rax), \ + PTREGS_INFO(rdx), \ + PTREGS_INFO(rcx), \ + PTREGS_INFO(rbx), \ + PTREGS_INFO(rsi), \ + PTREGS_INFO(rdi), \ + PTREGS_INFO(rbp), \ + PTREGS_INFO(rsp), \ + PTREGS_INFO(r8), \ + PTREGS_INFO(r9), \ + PTREGS_INFO(r10), \ + PTREGS_INFO(r11), \ + PTREGS_INFO(r12), \ + PTREGS_INFO(r13), \ + PTREGS_INFO(r14), \ + PTREGS_INFO(r15), \ + PTREGS_INFO(rip) + +static inline void arch_unw_init_frame_info(struct unwind_frame_info *info, + /*const*/ struct pt_regs *regs) +{ + info->regs = *regs; +} + +static inline void arch_unw_init_blocked(struct unwind_frame_info *info) +{ + extern const char thread_return[]; + + memset(&info->regs, 0, sizeof(info->regs)); + info->regs.rip = (unsigned long)thread_return; + info->regs.cs = __KERNEL_CS; + __get_user(info->regs.rbp, (unsigned long *)info->task->thread.rsp); + info->regs.rsp = info->task->thread.rsp; + info->regs.ss = __KERNEL_DS; +} + +extern int arch_unwind_init_running(struct unwind_frame_info *, + int (*callback)(struct unwind_frame_info *, + void *arg), + void *arg); + +static inline int arch_unw_user_mode(const struct unwind_frame_info *info) +{ +#if 0 /* This can only work when selector register saves/restores + are properly annotated (and tracked in UNW_REGISTER_INFO). */ + return user_mode(&info->regs); +#else + return (long)info->regs.rip >= 0 + || (info->regs.rip >= VSYSCALL_START && info->regs.rip < VSYSCALL_END) + || (long)info->regs.rsp >= 0; +#endif +} + +#else + +#define UNW_PC(frame) ((void)(frame), 0) + +static inline int arch_unw_user_mode(const void *info) +{ + return 0; +} + +#endif + +#endif /* _ASM_X86_64_UNWIND_H */ diff --git a/include/keys/user-type.h b/include/keys/user-type.h index a3dae1803f4..c37c34275a4 100644 --- a/include/keys/user-type.h +++ b/include/keys/user-type.h @@ -37,6 +37,7 @@ extern struct key_type key_type_user; extern int user_instantiate(struct key *key, const void *data, size_t datalen); extern int user_update(struct key *key, const void *data, size_t datalen); extern int user_match(const struct key *key, const void *criterion); +extern void user_revoke(struct key *key); extern void user_destroy(struct key *key); extern void user_describe(const struct key *user, struct seq_file *m); extern long user_read(const struct key *key, diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 90d6df1551e..88b5dfd8ee1 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -528,12 +528,18 @@ static inline void acpi_set_cstate_limit(unsigned int new_limit) { return; } #ifdef CONFIG_ACPI_NUMA int acpi_get_pxm(acpi_handle handle); +int acpi_get_node(acpi_handle *handle); #else static inline int acpi_get_pxm(acpi_handle handle) { return 0; } +static inline int acpi_get_node(acpi_handle *handle) +{ + return 0; +} #endif +extern int acpi_paddr_to_node(u64 start_addr, u64 size); extern int pnpacpi_disabled; diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index d9ed2796985..dcc5de7cc48 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -24,6 +24,9 @@ * The available bitmap operations and their rough meaning in the * case that the bitmap is a single unsigned long are thus: * + * Note that nbits should be always a compile time evaluable constant. + * Otherwise many inlines will generate horrible code. + * * bitmap_zero(dst, nbits) *dst = 0UL * bitmap_fill(dst, nbits) *dst = ~0UL * bitmap_copy(dst, src, nbits) *dst = *src @@ -244,6 +247,8 @@ static inline int bitmap_full(const unsigned long *src, int nbits) static inline int bitmap_weight(const unsigned long *src, int nbits) { + if (nbits <= BITS_PER_LONG) + return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); return __bitmap_weight(src, nbits); } diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index fb7e9b7ccbe..737e407d0cd 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -149,7 +149,6 @@ void create_empty_buffers(struct page *, unsigned long, unsigned long b_state); void end_buffer_read_sync(struct buffer_head *bh, int uptodate); void end_buffer_write_sync(struct buffer_head *bh, int uptodate); -void end_buffer_async_write(struct buffer_head *bh, int uptodate); /* Things to do with buffers at mapping->private_list */ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode); @@ -214,6 +213,7 @@ int nobh_truncate_page(struct address_space *, loff_t); int nobh_writepage(struct page *page, get_block_t *get_block, struct writeback_control *wbc); +void buffer_init(void); /* * inline definitions diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h new file mode 100644 index 00000000000..d852024ed09 --- /dev/null +++ b/include/linux/clocksource.h @@ -0,0 +1,185 @@ +/* linux/include/linux/clocksource.h + * + * This file contains the structure definitions for clocksources. + * + * If you are not a clocksource, or timekeeping code, you should + * not be including this file! + */ +#ifndef _LINUX_CLOCKSOURCE_H +#define _LINUX_CLOCKSOURCE_H + +#include <linux/types.h> +#include <linux/timex.h> +#include <linux/time.h> +#include <linux/list.h> +#include <asm/div64.h> +#include <asm/io.h> + +/* clocksource cycle base type */ +typedef u64 cycle_t; + +/** + * struct clocksource - hardware abstraction for a free running counter + * Provides mostly state-free accessors to the underlying hardware. + * + * @name: ptr to clocksource name + * @list: list head for registration + * @rating: rating value for selection (higher is better) + * To avoid rating inflation the following + * list should give you a guide as to how + * to assign your clocksource a rating + * 1-99: Unfit for real use + * Only available for bootup and testing purposes. + * 100-199: Base level usability. + * Functional for real use, but not desired. + * 200-299: Good. + * A correct and usable clocksource. + * 300-399: Desired. + * A reasonably fast and accurate clocksource. + * 400-499: Perfect + * The ideal clocksource. A must-use where + * available. + * @read: returns a cycle value + * @mask: bitmask for two's complement + * subtraction of non 64 bit counters + * @mult: cycle to nanosecond multiplier + * @shift: cycle to nanosecond divisor (power of two) + * @update_callback: called when safe to alter clocksource values + * @is_continuous: defines if clocksource is free-running. + * @cycle_interval: Used internally by timekeeping core, please ignore. + * @xtime_interval: Used internally by timekeeping core, please ignore. + */ +struct clocksource { + char *name; + struct list_head list; + int rating; + cycle_t (*read)(void); + cycle_t mask; + u32 mult; + u32 shift; + int (*update_callback)(void); + int is_continuous; + + /* timekeeping specific data, ignore */ + cycle_t cycle_last, cycle_interval; + u64 xtime_nsec, xtime_interval; + s64 error; +}; + +/* simplify initialization of mask field */ +#define CLOCKSOURCE_MASK(bits) (cycle_t)(bits<64 ? ((1ULL<<bits)-1) : -1) + +/** + * clocksource_khz2mult - calculates mult from khz and shift + * @khz: Clocksource frequency in KHz + * @shift_constant: Clocksource shift factor + * + * Helper functions that converts a khz counter frequency to a timsource + * multiplier, given the clocksource shift value + */ +static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant) +{ + /* khz = cyc/(Million ns) + * mult/2^shift = ns/cyc + * mult = ns/cyc * 2^shift + * mult = 1Million/khz * 2^shift + * mult = 1000000 * 2^shift / khz + * mult = (1000000<<shift) / khz + */ + u64 tmp = ((u64)1000000) << shift_constant; + + tmp += khz/2; /* round for do_div */ + do_div(tmp, khz); + + return (u32)tmp; +} + +/** + * clocksource_hz2mult - calculates mult from hz and shift + * @hz: Clocksource frequency in Hz + * @shift_constant: Clocksource shift factor + * + * Helper functions that converts a hz counter + * frequency to a timsource multiplier, given the + * clocksource shift value + */ +static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) +{ + /* hz = cyc/(Billion ns) + * mult/2^shift = ns/cyc + * mult = ns/cyc * 2^shift + * mult = 1Billion/hz * 2^shift + * mult = 1000000000 * 2^shift / hz + * mult = (1000000000<<shift) / hz + */ + u64 tmp = ((u64)1000000000) << shift_constant; + + tmp += hz/2; /* round for do_div */ + do_div(tmp, hz); + + return (u32)tmp; +} + +/** + * clocksource_read: - Access the clocksource's current cycle value + * @cs: pointer to clocksource being read + * + * Uses the clocksource to return the current cycle_t value + */ +static inline cycle_t clocksource_read(struct clocksource *cs) +{ + return cs->read(); +} + +/** + * cyc2ns - converts clocksource cycles to nanoseconds + * @cs: Pointer to clocksource + * @cycles: Cycles + * + * Uses the clocksource and ntp ajdustment to convert cycle_ts to nanoseconds. + * + * XXX - This could use some mult_lxl_ll() asm optimization + */ +static inline s64 cyc2ns(struct clocksource *cs, cycle_t cycles) +{ + u64 ret = (u64)cycles; + ret = (ret * cs->mult) >> cs->shift; + return ret; +} + +/** + * clocksource_calculate_interval - Calculates a clocksource interval struct + * + * @c: Pointer to clocksource. + * @length_nsec: Desired interval length in nanoseconds. + * + * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment + * pair and interval request. + * + * Unless you're the timekeeping code, you should not be using this! + */ +static inline void clocksource_calculate_interval(struct clocksource *c, + unsigned long length_nsec) +{ + u64 tmp; + + /* XXX - All of this could use a whole lot of optimization */ + tmp = length_nsec; + tmp <<= c->shift; + tmp += c->mult/2; + do_div(tmp, c->mult); + + c->cycle_interval = (cycle_t)tmp; + if (c->cycle_interval == 0) + c->cycle_interval = 1; + + c->xtime_interval = (u64)c->cycle_interval * c->mult; +} + + +/* used to install a new clocksource */ +int clocksource_register(struct clocksource*); +void clocksource_reselect(void); +struct clocksource* clocksource_get_next(void); + +#endif /* _LINUX_CLOCKSOURCE_H */ diff --git a/include/linux/compat.h b/include/linux/compat.h index dda1697ec75..9760753e662 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -226,5 +226,7 @@ static inline int compat_timespec_compare(struct compat_timespec *lhs, asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); +extern int compat_printk(const char *fmt, ...); + #endif /* CONFIG_COMPAT */ #endif /* _LINUX_COMPAT_H */ diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h index 89ab677cb99..917d62e4148 100644 --- a/include/linux/compat_ioctl.h +++ b/include/linux/compat_ioctl.h @@ -673,6 +673,11 @@ COMPATIBLE_IOCTL(CAPI_SET_FLAGS) COMPATIBLE_IOCTL(CAPI_CLR_FLAGS) COMPATIBLE_IOCTL(CAPI_NCCI_OPENCOUNT) COMPATIBLE_IOCTL(CAPI_NCCI_GETUNIT) +/* Siemens Gigaset */ +COMPATIBLE_IOCTL(GIGASET_REDIR) +COMPATIBLE_IOCTL(GIGASET_CONFIG) +COMPATIBLE_IOCTL(GIGASET_BRKCHARS) +COMPATIBLE_IOCTL(GIGASET_VERSION) /* Misc. */ COMPATIBLE_IOCTL(0x41545900) /* ATYIO_CLKR */ COMPATIBLE_IOCTL(0x41545901) /* ATYIO_CLKW */ diff --git a/include/linux/console.h b/include/linux/console.h index d0f8a800949..3bdf2155e56 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -63,9 +63,11 @@ extern const struct consw vga_con; /* VGA text console */ extern const struct consw newport_con; /* SGI Newport console */ extern const struct consw prom_con; /* SPARC PROM console */ +int con_is_bound(const struct consw *csw); +int register_con_driver(const struct consw *csw, int first, int last); +int unregister_con_driver(const struct consw *csw); int take_over_console(const struct consw *sw, int first, int last, int deflt); void give_up_console(const struct consw *sw); - /* scroll */ #define SM_UP (1) #define SM_DOWN (2) diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 08d50c53aab..a3caf6866ba 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -31,17 +31,23 @@ struct cpu { struct sys_device sysdev; }; -extern int register_cpu(struct cpu *, int, struct node *); +extern int register_cpu(struct cpu *cpu, int num); extern struct sys_device *get_cpu_sysdev(unsigned cpu); #ifdef CONFIG_HOTPLUG_CPU -extern void unregister_cpu(struct cpu *, struct node *); +extern void unregister_cpu(struct cpu *cpu); #endif struct notifier_block; #ifdef CONFIG_SMP /* Need to know about CPUs going up/down? */ extern int register_cpu_notifier(struct notifier_block *nb); +#ifdef CONFIG_HOTPLUG_CPU extern void unregister_cpu_notifier(struct notifier_block *nb); +#else +static inline void unregister_cpu_notifier(struct notifier_block *nb) +{ +} +#endif extern int current_in_cpu_hotplug(void); int cpu_up(unsigned int cpu); @@ -73,6 +79,8 @@ extern int lock_cpu_hotplug_interruptible(void); { .notifier_call = fn, .priority = pri }; \ register_cpu_notifier(&fn##_nb); \ } +#define register_hotcpu_notifier(nb) register_cpu_notifier(nb) +#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) int cpu_down(unsigned int cpu); #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) #else @@ -80,6 +88,8 @@ int cpu_down(unsigned int cpu); #define unlock_cpu_hotplug() do { } while (0) #define lock_cpu_hotplug_interruptible() 0 #define hotcpu_notifier(fn, pri) +#define register_hotcpu_notifier(nb) +#define unregister_hotcpu_notifier(nb) /* CPUs don't go offline once they're online w/o CONFIG_HOTPLUG_CPU */ static inline int cpu_is_offline(int cpu) { return 0; } diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 5a0470e3611..7f946241b87 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -66,7 +66,7 @@ struct crypto_tfm; struct cipher_desc { struct crypto_tfm *tfm; - void (*crfn)(void *ctx, u8 *dst, const u8 *src); + void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst, const u8 *src, unsigned int nbytes); void *info; @@ -79,10 +79,10 @@ struct cipher_desc { struct cipher_alg { unsigned int cia_min_keysize; unsigned int cia_max_keysize; - int (*cia_setkey)(void *ctx, const u8 *key, + int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen, u32 *flags); - void (*cia_encrypt)(void *ctx, u8 *dst, const u8 *src); - void (*cia_decrypt)(void *ctx, u8 *dst, const u8 *src); + void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); + void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); unsigned int (*cia_encrypt_ecb)(const struct cipher_desc *desc, u8 *dst, const u8 *src, @@ -100,20 +100,19 @@ struct cipher_alg { struct digest_alg { unsigned int dia_digestsize; - void (*dia_init)(void *ctx); - void (*dia_update)(void *ctx, const u8 *data, unsigned int len); - void (*dia_final)(void *ctx, u8 *out); - int (*dia_setkey)(void *ctx, const u8 *key, + void (*dia_init)(struct crypto_tfm *tfm); + void (*dia_update)(struct crypto_tfm *tfm, const u8 *data, + unsigned int len); + void (*dia_final)(struct crypto_tfm *tfm, u8 *out); + int (*dia_setkey)(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen, u32 *flags); }; struct compress_alg { - int (*coa_init)(void *ctx); - void (*coa_exit)(void *ctx); - int (*coa_compress)(void *ctx, const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen); - int (*coa_decompress)(void *ctx, const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen); + int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen); + int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen); }; #define cra_cipher cra_u.cipher @@ -129,14 +128,17 @@ struct crypto_alg { int cra_priority; - const char cra_name[CRYPTO_MAX_ALG_NAME]; - const char cra_driver_name[CRYPTO_MAX_ALG_NAME]; + char cra_name[CRYPTO_MAX_ALG_NAME]; + char cra_driver_name[CRYPTO_MAX_ALG_NAME]; union { struct cipher_alg cipher; struct digest_alg digest; struct compress_alg compress; } cra_u; + + int (*cra_init)(struct crypto_tfm *tfm); + void (*cra_exit)(struct crypto_tfm *tfm); struct module *cra_module; }; diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index aee10b2ea4c..e3d1c33d155 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -8,9 +8,12 @@ #ifndef _LINUX_DEVICE_MAPPER_H #define _LINUX_DEVICE_MAPPER_H +#ifdef __KERNEL__ + struct dm_target; struct dm_table; struct dm_dev; +struct mapped_device; typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; @@ -78,7 +81,7 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d); struct target_type { const char *name; struct module *module; - unsigned version[3]; + unsigned version[3]; dm_ctr_fn ctr; dm_dtr_fn dtr; dm_map_fn map; @@ -128,4 +131,108 @@ struct dm_target { int dm_register_target(struct target_type *t); int dm_unregister_target(struct target_type *t); -#endif /* _LINUX_DEVICE_MAPPER_H */ + +/*----------------------------------------------------------------- + * Functions for creating and manipulating mapped devices. + * Drop the reference with dm_put when you finish with the object. + *---------------------------------------------------------------*/ + +/* + * DM_ANY_MINOR chooses the next available minor number. + */ +#define DM_ANY_MINOR (-1) +int dm_create(int minor, struct mapped_device **md); + +/* + * Reference counting for md. + */ +struct mapped_device *dm_get_md(dev_t dev); +void dm_get(struct mapped_device *md); +void dm_put(struct mapped_device *md); + +/* + * An arbitrary pointer may be stored alongside a mapped device. + */ +void dm_set_mdptr(struct mapped_device *md, void *ptr); +void *dm_get_mdptr(struct mapped_device *md); + +/* + * A device can still be used while suspended, but I/O is deferred. + */ +int dm_suspend(struct mapped_device *md, int with_lockfs); +int dm_resume(struct mapped_device *md); + +/* + * Event functions. + */ +uint32_t dm_get_event_nr(struct mapped_device *md); +int dm_wait_event(struct mapped_device *md, int event_nr); + +/* + * Info functions. + */ +const char *dm_device_name(struct mapped_device *md); +struct gendisk *dm_disk(struct mapped_device *md); +int dm_suspended(struct mapped_device *md); + +/* + * Geometry functions. + */ +int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo); +int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo); + + +/*----------------------------------------------------------------- + * Functions for manipulating device-mapper tables. + *---------------------------------------------------------------*/ + +/* + * First create an empty table. + */ +int dm_table_create(struct dm_table **result, int mode, + unsigned num_targets, struct mapped_device *md); + +/* + * Then call this once for each target. + */ +int dm_table_add_target(struct dm_table *t, const char *type, + sector_t start, sector_t len, char *params); + +/* + * Finally call this to make the table ready for use. + */ +int dm_table_complete(struct dm_table *t); + +/* + * Table reference counting. + */ +struct dm_table *dm_get_table(struct mapped_device *md); +void dm_table_get(struct dm_table *t); +void dm_table_put(struct dm_table *t); + +/* + * Queries + */ +sector_t dm_table_get_size(struct dm_table *t); +unsigned int dm_table_get_num_targets(struct dm_table *t); +int dm_table_get_mode(struct dm_table *t); +struct mapped_device *dm_table_get_md(struct dm_table *t); + +/* + * Trigger an event. + */ +void dm_table_event(struct dm_table *t); + +/* + * The device must be suspended before calling this method. + */ +int dm_swap_table(struct mapped_device *md, struct dm_table *t); + +/* + * Prepare a table for a device that will error all I/O. + * To make it active, call dm_suspend(), dm_swap_table() then dm_resume(). + */ +int dm_create_error_table(struct dm_table **result, struct mapped_device *md); + +#endif /* __KERNEL__ */ +#endif /* _LINUX_DEVICE_MAPPER_H */ diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h index c67c6786612..9623bb62509 100644 --- a/include/linux/dm-ioctl.h +++ b/include/linux/dm-ioctl.h @@ -285,9 +285,9 @@ typedef char ioctl_struct[308]; #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 6 +#define DM_VERSION_MINOR 7 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2006-02-17)" +#define DM_VERSION_EXTRA "-ioctl (2006-06-24)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ @@ -314,7 +314,7 @@ typedef char ioctl_struct[308]; #define DM_BUFFER_FULL_FLAG (1 << 8) /* Out */ /* - * Set this to improve performance when you aren't going to use open_count. + * This flag is now ignored. */ #define DM_SKIP_BDGET_FLAG (1 << 9) /* In */ diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 78b236ca04f..272010a6078 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -20,7 +20,7 @@ */ #ifndef DMAENGINE_H #define DMAENGINE_H -#include <linux/config.h> + #ifdef CONFIG_DMA_ENGINE #include <linux/device.h> diff --git a/include/linux/fb.h b/include/linux/fb.h index f1281687e54..07a08e92bc7 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -504,23 +504,19 @@ struct fb_cursor_user { #define FB_EVENT_MODE_DELETE 0x04 /* A driver registered itself */ #define FB_EVENT_FB_REGISTERED 0x05 +/* A driver unregistered itself */ +#define FB_EVENT_FB_UNREGISTERED 0x06 /* CONSOLE-SPECIFIC: get console to framebuffer mapping */ -#define FB_EVENT_GET_CONSOLE_MAP 0x06 +#define FB_EVENT_GET_CONSOLE_MAP 0x07 /* CONSOLE-SPECIFIC: set console to framebuffer mapping */ -#define FB_EVENT_SET_CONSOLE_MAP 0x07 +#define FB_EVENT_SET_CONSOLE_MAP 0x08 /* A display blank is requested */ -#define FB_EVENT_BLANK 0x08 +#define FB_EVENT_BLANK 0x09 /* Private modelist is to be replaced */ -#define FB_EVENT_NEW_MODELIST 0x09 +#define FB_EVENT_NEW_MODELIST 0x0A /* The resolution of the passed in fb_info about to change and all vc's should be changed */ -#define FB_EVENT_MODE_CHANGE_ALL 0x0A -/* CONSOLE-SPECIFIC: set console rotation */ -#define FB_EVENT_SET_CON_ROTATE 0x0B -/* CONSOLE-SPECIFIC: get console rotation */ -#define FB_EVENT_GET_CON_ROTATE 0x0C -/* CONSOLE-SPECIFIC: rotate all consoles */ -#define FB_EVENT_SET_CON_ROTATE_ALL 0x0D +#define FB_EVENT_MODE_CHANGE_ALL 0x0B struct fb_event { struct fb_info *info; @@ -892,7 +888,6 @@ extern int fb_get_color_depth(struct fb_var_screeninfo *var, struct fb_fix_screeninfo *fix); extern int fb_get_options(char *name, char **option); extern int fb_new_modelist(struct fb_info *info); -extern int fb_con_duit(struct fb_info *info, int event, void *data); extern struct fb_info *registered_fb[FB_MAX]; extern int num_registered_fb; diff --git a/include/linux/futex.h b/include/linux/futex.h index 966a5b3da43..34c3a215f2c 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -12,6 +12,9 @@ #define FUTEX_REQUEUE 3 #define FUTEX_CMP_REQUEUE 4 #define FUTEX_WAKE_OP 5 +#define FUTEX_LOCK_PI 6 +#define FUTEX_UNLOCK_PI 7 +#define FUTEX_TRYLOCK_PI 8 /* * Support for robust futexes: the kernel cleans up held futexes at @@ -90,18 +93,21 @@ struct robust_list_head { */ #define ROBUST_LIST_LIMIT 2048 -long do_futex(unsigned long uaddr, int op, int val, - unsigned long timeout, unsigned long uaddr2, int val2, - int val3); +long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout, + u32 __user *uaddr2, u32 val2, u32 val3); extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr); #ifdef CONFIG_FUTEX extern void exit_robust_list(struct task_struct *curr); +extern void exit_pi_state_list(struct task_struct *curr); #else static inline void exit_robust_list(struct task_struct *curr) { } +static inline void exit_pi_state_list(struct task_struct *curr) +{ +} #endif #define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */ diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h new file mode 100644 index 00000000000..21ea7610e17 --- /dev/null +++ b/include/linux/hw_random.h @@ -0,0 +1,50 @@ +/* + Hardware Random Number Generator + + Please read Documentation/hw_random.txt for details on use. + + ---------------------------------------------------------- + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + */ + +#ifndef LINUX_HWRANDOM_H_ +#define LINUX_HWRANDOM_H_ +#ifdef __KERNEL__ + +#include <linux/types.h> +#include <linux/list.h> + +/** + * struct hwrng - Hardware Random Number Generator driver + * @name: Unique RNG name. + * @init: Initialization callback (can be NULL). + * @cleanup: Cleanup callback (can be NULL). + * @data_present: Callback to determine if data is available + * on the RNG. If NULL, it is assumed that + * there is always data available. + * @data_read: Read data from the RNG device. + * Returns the number of lower random bytes in "data". + * Must not be NULL. + * @priv: Private data, for use by the RNG driver. + */ +struct hwrng { + const char *name; + int (*init)(struct hwrng *rng); + void (*cleanup)(struct hwrng *rng); + int (*data_present)(struct hwrng *rng); + int (*data_read)(struct hwrng *rng, u32 *data); + unsigned long priv; + + /* internal. */ + struct list_head list; +}; + +/** Register a new Hardware Random Number Generator driver. */ +extern int hwrng_register(struct hwrng *rng); +/** Unregister a Hardware Random Number Generator driver. */ +extern void hwrng_unregister(struct hwrng *rng); + +#endif /* __KERNEL__ */ +#endif /* LINUX_HWRANDOM_H_ */ diff --git a/include/linux/idr.h b/include/linux/idr.h index d37c8d808b0..f559a719dbe 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -78,6 +78,7 @@ void *idr_find(struct idr *idp, int id); int idr_pre_get(struct idr *idp, gfp_t gfp_mask); int idr_get_new(struct idr *idp, void *ptr, int *id); int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); +void *idr_replace(struct idr *idp, void *ptr, int id); void idr_remove(struct idr *idp, int id); void idr_destroy(struct idr *idp); void idr_init(struct idr *idp); diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 41ecbb847f3..3a256957fb5 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -87,6 +87,7 @@ extern struct group_info init_groups; .lock_depth = -1, \ .prio = MAX_PRIO-20, \ .static_prio = MAX_PRIO-20, \ + .normal_prio = MAX_PRIO-20, \ .policy = SCHED_NORMAL, \ .cpus_allowed = CPU_MASK_ALL, \ .mm = NULL, \ @@ -119,10 +120,11 @@ extern struct group_info init_groups; .signal = {{0}}}, \ .blocked = {{0}}, \ .alloc_lock = SPIN_LOCK_UNLOCKED, \ - .proc_lock = SPIN_LOCK_UNLOCKED, \ .journal_info = NULL, \ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ .fs_excl = ATOMIC_INIT(0), \ + .pi_lock = SPIN_LOCK_UNLOCKED, \ + INIT_RT_MUTEXES(tsk) \ } diff --git a/include/linux/input.h b/include/linux/input.h index b32c2b6e53f..56f1e0e1e59 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -232,7 +232,8 @@ struct input_absinfo { #define KEY_PAUSE 119 #define KEY_KPCOMMA 121 -#define KEY_HANGUEL 122 +#define KEY_HANGEUL 122 +#define KEY_HANGUEL KEY_HANGEUL #define KEY_HANJA 123 #define KEY_YEN 124 #define KEY_LEFTMETA 125 @@ -1005,6 +1006,7 @@ static inline void init_input_dev(struct input_dev *dev) } struct input_dev *input_allocate_device(void); +void input_free_device(struct input_dev *dev); static inline struct input_dev *input_get_device(struct input_dev *dev) { @@ -1016,12 +1018,6 @@ static inline void input_put_device(struct input_dev *dev) class_device_put(&dev->cdev); } -static inline void input_free_device(struct input_dev *dev) -{ - if (dev) - input_put_device(dev); -} - int input_register_device(struct input_dev *); void input_unregister_device(struct input_dev *); diff --git a/include/linux/ioport.h b/include/linux/ioport.h index cd6bd001ba4..edfc733b157 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -105,6 +105,9 @@ extern int allocate_resource(struct resource *root, struct resource *new, int adjust_resource(struct resource *res, unsigned long start, unsigned long size); +/* get registered SYSTEM_RAM resources in specified area */ +extern int find_next_system_ram(struct resource *res); + /* Convenience shorthand with allocation */ #define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name)) #define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name)) diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index 5653b2f23b6..d09fbeabf1d 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h @@ -210,11 +210,7 @@ struct kernel_ipmi_msg #include <linux/list.h> #include <linux/module.h> #include <linux/device.h> - -#ifdef CONFIG_PROC_FS #include <linux/proc_fs.h> -extern struct proc_dir_entry *proc_ipmi_root; -#endif /* CONFIG_PROC_FS */ /* Opaque type for a IPMI message user. One of these is needed to send and receive messages. */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 3c5e4c2e517..5c1ec1f84ea 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -32,6 +32,7 @@ extern const char linux_banner[]; #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) +#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) #define KERN_EMERG "<0>" /* system is unusable */ #define KERN_ALERT "<1>" /* action must be taken immediately */ @@ -336,6 +337,12 @@ struct sysinfo { /* Force a compilation error if condition is true */ #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) +/* Force a compilation error if condition is true, but also produce a + result (of value 0 and type size_t), so the expression can be used + e.g. in a structure initializer (or where-ever else comma expressions + aren't permitted). */ +#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1) + /* Trap pasters of __FUNCTION__ at compile-time */ #define __FUNCTION__ (__func__) diff --git a/include/linux/key.h b/include/linux/key.h index e81ebf910d0..e693e729bc9 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -248,7 +248,14 @@ extern struct key *key_alloc(struct key_type *type, const char *desc, uid_t uid, gid_t gid, struct task_struct *ctx, - key_perm_t perm, int not_in_quota); + key_perm_t perm, + unsigned long flags); + + +#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */ +#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */ +#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */ + extern int key_payload_reserve(struct key *key, size_t datalen); extern int key_instantiate_and_link(struct key *key, const void *data, @@ -285,7 +292,7 @@ extern key_ref_t key_create_or_update(key_ref_t keyring, const char *description, const void *payload, size_t plen, - int not_in_quota); + unsigned long flags); extern int key_update(key_ref_t key, const void *payload, @@ -299,7 +306,7 @@ extern int key_unlink(struct key *keyring, extern struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid, struct task_struct *ctx, - int not_in_quota, + unsigned long flags, struct key *dest); extern int keyring_clear(struct key *keyring); diff --git a/include/linux/libata.h b/include/linux/libata.h index 20b1cf527c6..f4284bf8975 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -30,6 +30,7 @@ #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/dma-mapping.h> +#include <asm/scatterlist.h> #include <asm/io.h> #include <linux/ata.h> #include <linux/workqueue.h> @@ -887,6 +888,9 @@ static inline unsigned int ata_tag_internal(unsigned int tag) return tag == ATA_MAX_QUEUE - 1; } +/* + * device helpers + */ static inline unsigned int ata_class_enabled(unsigned int class) { return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI; @@ -917,6 +921,17 @@ static inline unsigned int ata_dev_absent(const struct ata_device *dev) return ata_class_absent(dev->class); } +/* + * port helpers + */ +static inline int ata_port_max_devices(const struct ata_port *ap) +{ + if (ap->flags & ATA_FLAG_SLAVE_POSS) + return 2; + return 1; +} + + static inline u8 ata_chk_status(struct ata_port *ap) { return ap->ops->check_status(ap); diff --git a/include/linux/license.h b/include/linux/license.h new file mode 100644 index 00000000000..decdbf43cb5 --- /dev/null +++ b/include/linux/license.h @@ -0,0 +1,14 @@ +#ifndef __LICENSE_H +#define __LICENSE_H + +static inline int license_is_gpl_compatible(const char *license) +{ + return (strcmp(license, "GPL") == 0 + || strcmp(license, "GPL v2") == 0 + || strcmp(license, "GPL and additional rights") == 0 + || strcmp(license, "Dual BSD/GPL") == 0 + || strcmp(license, "Dual MIT/GPL") == 0 + || strcmp(license, "Dual MPL/GPL") == 0); +} + +#endif diff --git a/include/linux/list.h b/include/linux/list.h index 37ca31b21bb..6b74adf5297 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -4,18 +4,11 @@ #ifdef __KERNEL__ #include <linux/stddef.h> +#include <linux/poison.h> #include <linux/prefetch.h> #include <asm/system.h> /* - * These are non-NULL pointers that will result in page faults - * under normal circumstances, used to verify that nobody uses - * non-initialized list entries. - */ -#define LIST_POISON1 ((void *) 0x00100100) -#define LIST_POISON2 ((void *) 0x00200200) - -/* * Simple doubly linked list implementation. * * Some of the internal functions ("__xxx") are useful when diff --git a/include/linux/loop.h b/include/linux/loop.h index bf3d2345ce9..e76c7611d6c 100644 --- a/include/linux/loop.h +++ b/include/linux/loop.h @@ -59,7 +59,7 @@ struct loop_device { struct bio *lo_bio; struct bio *lo_biotail; int lo_state; - struct task_struct *lo_thread; + struct completion lo_done; struct completion lo_bh_done; struct mutex lo_ctl_mutex; int lo_pending; diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 91120638617..218501cfaeb 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -63,6 +63,76 @@ extern int online_pages(unsigned long, unsigned long); /* reasonably generic interface to expand the physical pages in a zone */ extern int __add_pages(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages); + +#ifdef CONFIG_NUMA +extern int memory_add_physaddr_to_nid(u64 start); +#else +static inline int memory_add_physaddr_to_nid(u64 start) +{ + return 0; +} +#endif + +#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION +/* + * For supporting node-hotadd, we have to allocate a new pgdat. + * + * If an arch has generic style NODE_DATA(), + * node_data[nid] = kzalloc() works well. But it depends on the architecture. + * + * In general, generic_alloc_nodedata() is used. + * Now, arch_free_nodedata() is just defined for error path of node_hot_add. + * + */ +extern pg_data_t *arch_alloc_nodedata(int nid); +extern void arch_free_nodedata(pg_data_t *pgdat); +extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat); + +#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ + +#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid) +#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat) + +#ifdef CONFIG_NUMA +/* + * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat. + * XXX: kmalloc_node() can't work well to get new node's memory at this time. + * Because, pgdat for the new node is not allocated/initialized yet itself. + * To use new node's memory, more consideration will be necessary. + */ +#define generic_alloc_nodedata(nid) \ +({ \ + kzalloc(sizeof(pg_data_t), GFP_KERNEL); \ +}) +/* + * This definition is just for error path in node hotadd. + * For node hotremove, we have to replace this. + */ +#define generic_free_nodedata(pgdat) kfree(pgdat) + +extern pg_data_t *node_data[]; +static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) +{ + node_data[nid] = pgdat; +} + +#else /* !CONFIG_NUMA */ + +/* never called */ +static inline pg_data_t *generic_alloc_nodedata(int nid) +{ + BUG(); + return NULL; +} +static inline void generic_free_nodedata(pg_data_t *pgdat) +{ +} +static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) +{ +} +#endif /* CONFIG_NUMA */ +#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ + #else /* ! CONFIG_MEMORY_HOTPLUG */ /* * Stub functions for when hotplug is off @@ -99,7 +169,8 @@ static inline int __remove_pages(struct zone *zone, unsigned long start_pfn, return -ENOSYS; } -extern int add_memory(u64 start, u64 size); +extern int add_memory(int nid, u64 start, u64 size); +extern int arch_add_memory(int nid, u64 start, u64 size); extern int remove_memory(u64 start, u64 size); #endif /* __LINUX_MEMORY_HOTPLUG_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index a929ea197e4..c41a1299b8c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1030,13 +1030,20 @@ static inline void vm_stat_account(struct mm_struct *mm, } #endif /* CONFIG_PROC_FS */ +static inline void +debug_check_no_locks_freed(const void *from, unsigned long len) +{ + mutex_debug_check_no_locks_freed(from, len); + rt_mutex_debug_check_no_locks_freed(from, len); +} + #ifndef CONFIG_DEBUG_PAGEALLOC static inline void kernel_map_pages(struct page *page, int numpages, int enable) { if (!PageHighMem(page) && !enable) - mutex_debug_check_no_locks_freed(page_address(page), - numpages * PAGE_SIZE); + debug_check_no_locks_freed(page_address(page), + numpages * PAGE_SIZE); } #endif @@ -1065,5 +1072,7 @@ void drop_slab(void); extern int randomize_va_space; #endif +const char *arch_vma_name(struct vm_area_struct *vma); + #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff --git a/include/linux/module.h b/include/linux/module.h index 2d366098eab..9ebbb74b7b7 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -285,6 +285,9 @@ struct module /* The size of the executable code in each section. */ unsigned long init_text_size, core_text_size; + /* The handle returned from unwind_add_table. */ + void *unwind_info; + /* Arch-specific module values */ struct mod_arch_specific arch; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index bc747e5d713..03cd7551a7a 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -699,7 +699,6 @@ extern int dev_hard_start_xmit(struct sk_buff *skb, extern void dev_init(void); -extern int netdev_nit; extern int netdev_budget; /* Called by rtnetlink.c:rtnl_unlock() */ diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index ca5a8733000..1efe60c5c00 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -31,6 +31,7 @@ struct netpoll_info { int rx_flags; spinlock_t rx_lock; struct netpoll *rx_np; /* netpoll that registered an rx_hook */ + struct sk_buff_head arp_tx; /* list of arp requests to reply to */ }; void netpoll_poll(struct netpoll *np); diff --git a/include/linux/node.h b/include/linux/node.h index 254dc3de650..81dcec84cd8 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -26,8 +26,25 @@ struct node { struct sys_device sysdev; }; +extern struct node node_devices[]; + extern int register_node(struct node *, int, struct node *); extern void unregister_node(struct node *node); +extern int register_one_node(int nid); +extern void unregister_one_node(int nid); +#ifdef CONFIG_NUMA +extern int register_cpu_under_node(unsigned int cpu, unsigned int nid); +extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); +#else +static inline int register_cpu_under_node(unsigned int cpu, unsigned int nid) +{ + return 0; +} +static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) +{ + return 0; +} +#endif #define to_node(sys_device) container_of(sys_device, struct node, sysdev) diff --git a/include/linux/nsc_gpio.h b/include/linux/nsc_gpio.h new file mode 100644 index 00000000000..135742cfada --- /dev/null +++ b/include/linux/nsc_gpio.h @@ -0,0 +1,42 @@ +/** + nsc_gpio.c + + National Semiconductor GPIO common access methods. + + struct nsc_gpio_ops abstracts the low-level access + operations for the GPIO units on 2 NSC chip families; the GEODE + integrated CPU, and the PC-8736[03456] integrated PC-peripheral + chips. + + The GPIO units on these chips have the same pin architecture, but + the access methods differ. Thus, scx200_gpio and pc8736x_gpio + implement their own versions of these routines; and use the common + file-operations routines implemented in nsc_gpio module. + + Copyright (c) 2005 Jim Cromie <jim.cromie@gmail.com> + + NB: this work was tested on the Geode SC-1100 and PC-87366 chips. + NSC sold the GEODE line to AMD, and the PC-8736x line to Winbond. +*/ + +struct nsc_gpio_ops { + struct module* owner; + u32 (*gpio_config) (unsigned iminor, u32 mask, u32 bits); + void (*gpio_dump) (struct nsc_gpio_ops *amp, unsigned iminor); + int (*gpio_get) (unsigned iminor); + void (*gpio_set) (unsigned iminor, int state); + void (*gpio_set_high)(unsigned iminor); + void (*gpio_set_low) (unsigned iminor); + void (*gpio_change) (unsigned iminor); + int (*gpio_current) (unsigned iminor); + struct device* dev; /* for dev_dbg() support, set in init */ +}; + +extern ssize_t nsc_gpio_write(struct file *file, const char __user *data, + size_t len, loff_t *ppos); + +extern ssize_t nsc_gpio_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos); + +extern void nsc_gpio_dump(struct nsc_gpio_ops *amp, unsigned index); + diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index c2fd2d19938..9ae6b1a7536 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1202,6 +1202,7 @@ #define PCI_DEVICE_ID_NVIDIA_NVENET_19 0x03EF #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2 0x03F6 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3 0x03F7 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE 0x0448 #define PCI_DEVICE_ID_NVIDIA_NVENET_20 0x0450 #define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451 #define PCI_DEVICE_ID_NVIDIA_NVENET_22 0x0452 @@ -2170,7 +2171,6 @@ #define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815 #define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e #define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850 -#define PCI_DEVICE_ID_INTEL_GD31244 0x3200 #define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 #define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 #define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577 diff --git a/include/linux/plist.h b/include/linux/plist.h new file mode 100644 index 00000000000..3404faef542 --- /dev/null +++ b/include/linux/plist.h @@ -0,0 +1,247 @@ +/* + * Descending-priority-sorted double-linked list + * + * (C) 2002-2003 Intel Corp + * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>. + * + * 2001-2005 (c) MontaVista Software, Inc. + * Daniel Walker <dwalker@mvista.com> + * + * (C) 2005 Thomas Gleixner <tglx@linutronix.de> + * + * Simplifications of the original code by + * Oleg Nesterov <oleg@tv-sign.ru> + * + * Licensed under the FSF's GNU Public License v2 or later. + * + * Based on simple lists (include/linux/list.h). + * + * This is a priority-sorted list of nodes; each node has a + * priority from INT_MIN (highest) to INT_MAX (lowest). + * + * Addition is O(K), removal is O(1), change of priority of a node is + * O(K) and K is the number of RT priority levels used in the system. + * (1 <= K <= 99) + * + * This list is really a list of lists: + * + * - The tier 1 list is the prio_list, different priority nodes. + * + * - The tier 2 list is the node_list, serialized nodes. + * + * Simple ASCII art explanation: + * + * |HEAD | + * | | + * |prio_list.prev|<------------------------------------| + * |prio_list.next|<->|pl|<->|pl|<--------------->|pl|<-| + * |10 | |10| |21| |21| |21| |40| (prio) + * | | | | | | | | | | | | + * | | | | | | | | | | | | + * |node_list.next|<->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<-| + * |node_list.prev|<------------------------------------| + * + * The nodes on the prio_list list are sorted by priority to simplify + * the insertion of new nodes. There are no nodes with duplicate + * priorites on the list. + * + * The nodes on the node_list is ordered by priority and can contain + * entries which have the same priority. Those entries are ordered + * FIFO + * + * Addition means: look for the prio_list node in the prio_list + * for the priority of the node and insert it before the node_list + * entry of the next prio_list node. If it is the first node of + * that priority, add it to the prio_list in the right position and + * insert it into the serialized node_list list + * + * Removal means remove it from the node_list and remove it from + * the prio_list if the node_list list_head is non empty. In case + * of removal from the prio_list it must be checked whether other + * entries of the same priority are on the list or not. If there + * is another entry of the same priority then this entry has to + * replace the removed entry on the prio_list. If the entry which + * is removed is the only entry of this priority then a simple + * remove from both list is sufficient. + * + * INT_MIN is the highest priority, 0 is the medium highest, INT_MAX + * is lowest priority. + * + * No locking is done, up to the caller. + * + */ +#ifndef _LINUX_PLIST_H_ +#define _LINUX_PLIST_H_ + +#include <linux/list.h> +#include <linux/spinlock_types.h> + +struct plist_head { + struct list_head prio_list; + struct list_head node_list; +#ifdef CONFIG_DEBUG_PI_LIST + spinlock_t *lock; +#endif +}; + +struct plist_node { + int prio; + struct plist_head plist; +}; + +#ifdef CONFIG_DEBUG_PI_LIST +# define PLIST_HEAD_LOCK_INIT(_lock) .lock = _lock +#else +# define PLIST_HEAD_LOCK_INIT(_lock) +#endif + +/** + * #PLIST_HEAD_INIT - static struct plist_head initializer + * + * @head: struct plist_head variable name + */ +#define PLIST_HEAD_INIT(head, _lock) \ +{ \ + .prio_list = LIST_HEAD_INIT((head).prio_list), \ + .node_list = LIST_HEAD_INIT((head).node_list), \ + PLIST_HEAD_LOCK_INIT(&(_lock)) \ +} + +/** + * #PLIST_NODE_INIT - static struct plist_node initializer + * + * @node: struct plist_node variable name + * @__prio: initial node priority + */ +#define PLIST_NODE_INIT(node, __prio) \ +{ \ + .prio = (__prio), \ + .plist = PLIST_HEAD_INIT((node).plist, NULL), \ +} + +/** + * plist_head_init - dynamic struct plist_head initializer + * + * @head: &struct plist_head pointer + */ +static inline void +plist_head_init(struct plist_head *head, spinlock_t *lock) +{ + INIT_LIST_HEAD(&head->prio_list); + INIT_LIST_HEAD(&head->node_list); +#ifdef CONFIG_DEBUG_PI_LIST + head->lock = lock; +#endif +} + +/** + * plist_node_init - Dynamic struct plist_node initializer + * + * @node: &struct plist_node pointer + * @prio: initial node priority + */ +static inline void plist_node_init(struct plist_node *node, int prio) +{ + node->prio = prio; + plist_head_init(&node->plist, NULL); +} + +extern void plist_add(struct plist_node *node, struct plist_head *head); +extern void plist_del(struct plist_node *node, struct plist_head *head); + +/** + * plist_for_each - iterate over the plist + * + * @pos1: the type * to use as a loop counter. + * @head: the head for your list. + */ +#define plist_for_each(pos, head) \ + list_for_each_entry(pos, &(head)->node_list, plist.node_list) + +/** + * plist_for_each_entry_safe - iterate over a plist of given type safe + * against removal of list entry + * + * @pos1: the type * to use as a loop counter. + * @n1: another type * to use as temporary storage + * @head: the head for your list. + */ +#define plist_for_each_safe(pos, n, head) \ + list_for_each_entry_safe(pos, n, &(head)->node_list, plist.node_list) + +/** + * plist_for_each_entry - iterate over list of given type + * + * @pos: the type * to use as a loop counter. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define plist_for_each_entry(pos, head, mem) \ + list_for_each_entry(pos, &(head)->node_list, mem.plist.node_list) + +/** + * plist_for_each_entry_safe - iterate over list of given type safe against + * removal of list entry + * + * @pos: the type * to use as a loop counter. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @m: the name of the list_struct within the struct. + */ +#define plist_for_each_entry_safe(pos, n, head, m) \ + list_for_each_entry_safe(pos, n, &(head)->node_list, m.plist.node_list) + +/** + * plist_head_empty - return !0 if a plist_head is empty + * + * @head: &struct plist_head pointer + */ +static inline int plist_head_empty(const struct plist_head *head) +{ + return list_empty(&head->node_list); +} + +/** + * plist_node_empty - return !0 if plist_node is not on a list + * + * @node: &struct plist_node pointer + */ +static inline int plist_node_empty(const struct plist_node *node) +{ + return plist_head_empty(&node->plist); +} + +/* All functions below assume the plist_head is not empty. */ + +/** + * plist_first_entry - get the struct for the first entry + * + * @ptr: the &struct plist_head pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + */ +#ifdef CONFIG_DEBUG_PI_LIST +# define plist_first_entry(head, type, member) \ +({ \ + WARN_ON(plist_head_empty(head)); \ + container_of(plist_first(head), type, member); \ +}) +#else +# define plist_first_entry(head, type, member) \ + container_of(plist_first(head), type, member) +#endif + +/** + * plist_first - return the first node (and thus, highest priority) + * + * @head: the &struct plist_head pointer + * + * Assumes the plist is _not_ empty. + */ +static inline struct plist_node* plist_first(const struct plist_head *head) +{ + return list_entry(head->node_list.next, + struct plist_node, plist.node_list); +} + +#endif diff --git a/include/linux/poison.h b/include/linux/poison.h new file mode 100644 index 00000000000..a5347c02432 --- /dev/null +++ b/include/linux/poison.h @@ -0,0 +1,58 @@ +#ifndef _LINUX_POISON_H +#define _LINUX_POISON_H + +/********** include/linux/list.h **********/ +/* + * These are non-NULL pointers that will result in page faults + * under normal circumstances, used to verify that nobody uses + * non-initialized list entries. + */ +#define LIST_POISON1 ((void *) 0x00100100) +#define LIST_POISON2 ((void *) 0x00200200) + +/********** mm/slab.c **********/ +/* + * Magic nums for obj red zoning. + * Placed in the first word before and the first word after an obj. + */ +#define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */ +#define RED_ACTIVE 0x170FC2A5UL /* when obj is active */ + +/* ...and for poisoning */ +#define POISON_INUSE 0x5a /* for use-uninitialised poisoning */ +#define POISON_FREE 0x6b /* for use-after-free poisoning */ +#define POISON_END 0xa5 /* end-byte of poisoning */ + +/********** arch/$ARCH/mm/init.c **********/ +#define POISON_FREE_INITMEM 0xcc + +/********** arch/x86_64/mm/init.c **********/ +#define POISON_FREE_INITDATA 0xba + +/********** arch/ia64/hp/common/sba_iommu.c **********/ +/* + * arch/ia64/hp/common/sba_iommu.c uses a 16-byte poison string with a + * value of "SBAIOMMU POISON\0" for spill-over poisoning. + */ + +/********** fs/jbd/journal.c **********/ +#define JBD_POISON_FREE 0x5b + +/********** drivers/base/dmapool.c **********/ +#define POOL_POISON_FREED 0xa7 /* !inuse */ +#define POOL_POISON_ALLOCATED 0xa9 /* !initted */ + +/********** drivers/atm/ **********/ +#define ATM_POISON_FREE 0x12 + +/********** kernel/mutexes **********/ +#define MUTEX_DEBUG_INIT 0x11 +#define MUTEX_DEBUG_FREE 0x22 + +/********** security/ **********/ +#define KEY_DESTROY 0xbd + +/********** sound/oss/ **********/ +#define OSS_POISON_FREE 0xAB + +#endif diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 5810d28fbed..17e75783e3a 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -99,9 +99,8 @@ extern void proc_misc_init(void); struct mm_struct; +void proc_flush_task(struct task_struct *task); struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *); -struct dentry *proc_pid_unhash(struct task_struct *p); -void proc_pid_flush(struct dentry *proc_dentry); int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir); unsigned long task_vsize(struct mm_struct *); int task_statm(struct mm_struct *, int *, int *, int *, int *); @@ -211,8 +210,7 @@ static inline void proc_net_remove(const char *name) #define proc_net_create(name, mode, info) ({ (void)(mode), NULL; }) static inline void proc_net_remove(const char *name) {} -static inline struct dentry *proc_pid_unhash(struct task_struct *p) { return NULL; } -static inline void proc_pid_flush(struct dentry *proc_dentry) { } +static inline void proc_flush_task(struct task_struct *task) { } static inline struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, struct proc_dir_entry *parent) { return NULL; } @@ -248,8 +246,8 @@ extern void kclist_add(struct kcore_list *, void *, size_t); #endif struct proc_inode { - struct task_struct *task; - int type; + struct pid *pid; + int fd; union { int (*proc_get_link)(struct inode *, struct dentry **, struct vfsmount **); int (*proc_read)(struct task_struct *task, char *page); @@ -268,4 +266,10 @@ static inline struct proc_dir_entry *PDE(const struct inode *inode) return PROC_I(inode)->pde; } +struct proc_maps_private { + struct pid *pid; + struct task_struct *task; + struct vm_area_struct *tail_vma; +}; + #endif /* _LINUX_PROC_FS_H */ diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index ee918bc6e18..8b2749a259d 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -88,7 +88,6 @@ extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __us extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); extern int ptrace_attach(struct task_struct *tsk); extern int ptrace_detach(struct task_struct *, unsigned int); -extern void __ptrace_detach(struct task_struct *, unsigned int); extern void ptrace_disable(struct task_struct *); extern int ptrace_check_attach(struct task_struct *task, int kill); extern int ptrace_request(struct task_struct *child, long request, long addr, long data); diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h index 899437802ae..63df898fe2e 100644 --- a/include/linux/raid/bitmap.h +++ b/include/linux/raid/bitmap.h @@ -140,6 +140,7 @@ typedef __u16 bitmap_counter_t; enum bitmap_state { BITMAP_ACTIVE = 0x001, /* the bitmap is in use */ BITMAP_STALE = 0x002, /* the bitmap file is out of date or had -EIO */ + BITMAP_WRITE_ERROR = 0x004, /* A write error has occurred */ BITMAP_HOSTENDIAN = 0x8000, }; @@ -244,15 +245,9 @@ struct bitmap { unsigned long daemon_lastrun; /* jiffies of last run */ unsigned long daemon_sleep; /* how many seconds between updates? */ - /* - * bitmap_writeback_daemon waits for file-pages that have been written, - * as there is no way to get a call-back when a page write completes. - */ - mdk_thread_t *writeback_daemon; - spinlock_t write_lock; + atomic_t pending_writes; /* pending writes to the bitmap file */ wait_queue_head_t write_wait; - struct list_head complete_pages; - mempool_t *write_pool; + }; /* the bitmap API */ diff --git a/include/linux/raid/linear.h b/include/linux/raid/linear.h index 7eaf290e10e..ba15469daf1 100644 --- a/include/linux/raid/linear.h +++ b/include/linux/raid/linear.h @@ -13,8 +13,10 @@ typedef struct dev_info dev_info_t; struct linear_private_data { + struct linear_private_data *prev; /* earlier version */ dev_info_t **hash_table; sector_t hash_spacing; + sector_t array_size; int preshift; /* shift before dividing by hash_spacing */ dev_info_t disks[0]; }; diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h index 66b44e5e0d6..eb3e547c8fe 100644 --- a/include/linux/raid/md.h +++ b/include/linux/raid/md.h @@ -85,8 +85,6 @@ extern void md_done_sync(mddev_t *mddev, int blocks, int ok); extern void md_error (mddev_t *mddev, mdk_rdev_t *rdev); extern void md_unplug_mddev(mddev_t *mddev); -extern void md_print_devices (void); - extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, sector_t sector, int size, struct page *page); extern void md_super_wait(mddev_t *mddev); @@ -97,7 +95,5 @@ extern void md_new_event(mddev_t *mddev); extern void md_update_sb(mddev_t * mddev); -#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } - #endif diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index e2df61f5b09..c1e0ac55bab 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h @@ -40,7 +40,8 @@ typedef struct mdk_rdev_s mdk_rdev_t; * options passed in raidrun: */ -#define MAX_CHUNK_SIZE (4096*1024) +/* Currently this must fix in an 'int' */ +#define MAX_CHUNK_SIZE (1<<30) /* * MD's 'extended' device @@ -57,6 +58,7 @@ struct mdk_rdev_s struct page *sb_page; int sb_loaded; + __u64 sb_events; sector_t data_offset; /* start of data in array */ sector_t sb_offset; int sb_size; /* bytes in the superblock */ @@ -87,6 +89,10 @@ struct mdk_rdev_s * array and could again if we did a partial * resync from the bitmap */ + sector_t recovery_offset;/* If this device has been partially + * recovered, this is where we were + * up to. + */ atomic_t nr_pending; /* number of pending requests. * only maintained for arrays that @@ -182,6 +188,8 @@ struct mddev_s #define MD_RECOVERY_REQUESTED 6 #define MD_RECOVERY_CHECK 7 #define MD_RECOVERY_RESHAPE 8 +#define MD_RECOVERY_FROZEN 9 + unsigned long recovery; int in_sync; /* know to not need resync */ diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h index f1fbae7e390..b6ebc69bae5 100644 --- a/include/linux/raid/md_p.h +++ b/include/linux/raid/md_p.h @@ -265,9 +265,12 @@ struct mdp_superblock_1 { /* feature_map bits */ #define MD_FEATURE_BITMAP_OFFSET 1 +#define MD_FEATURE_RECOVERY_OFFSET 2 /* recovery_offset is present and + * must be honoured + */ #define MD_FEATURE_RESHAPE_ACTIVE 4 -#define MD_FEATURE_ALL 5 +#define MD_FEATURE_ALL (1|2|4) #endif diff --git a/include/linux/raid/raid10.h b/include/linux/raid/raid10.h index b1103298a8c..c41e56a7c09 100644 --- a/include/linux/raid/raid10.h +++ b/include/linux/raid/raid10.h @@ -24,11 +24,16 @@ struct r10_private_data_s { int far_copies; /* number of copies layed out * at large strides across drives */ + int far_offset; /* far_copies are offset by 1 stripe + * instead of many + */ int copies; /* near_copies * far_copies. * must be <= raid_disks */ sector_t stride; /* distance between far copies. - * This is size / far_copies + * This is size / far_copies unless + * far_offset, in which case it is + * 1 stripe. */ int chunk_shift; /* shift from chunks to sectors */ diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h index 914af667044..20ed4c99763 100644 --- a/include/linux/raid/raid5.h +++ b/include/linux/raid/raid5.h @@ -212,6 +212,7 @@ struct raid5_private_data { mddev_t *mddev; struct disk_info *spare; int chunk_size, level, algorithm; + int max_degraded; int raid_disks, working_disks, failed_disks; int max_nr_stripes; diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 6312758393b..48dfe00070c 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -258,6 +258,7 @@ extern void rcu_init(void); extern void rcu_check_callbacks(int cpu, int user); extern void rcu_restart_cpu(int cpu); extern long rcu_batches_completed(void); +extern long rcu_batches_completed_bh(void); /* Exported interfaces */ extern void FASTCALL(call_rcu(struct rcu_head *head, diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h new file mode 100644 index 00000000000..fa4a3b82ba7 --- /dev/null +++ b/include/linux/rtmutex.h @@ -0,0 +1,117 @@ +/* + * RT Mutexes: blocking mutual exclusion locks with PI support + * + * started by Ingo Molnar and Thomas Gleixner: + * + * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> + * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> + * + * This file contains the public data structure and API definitions. + */ + +#ifndef __LINUX_RT_MUTEX_H +#define __LINUX_RT_MUTEX_H + +#include <linux/linkage.h> +#include <linux/plist.h> +#include <linux/spinlock_types.h> + +/* + * The rt_mutex structure + * + * @wait_lock: spinlock to protect the structure + * @wait_list: pilist head to enqueue waiters in priority order + * @owner: the mutex owner + */ +struct rt_mutex { + spinlock_t wait_lock; + struct plist_head wait_list; + struct task_struct *owner; +#ifdef CONFIG_DEBUG_RT_MUTEXES + int save_state; + struct list_head held_list_entry; + unsigned long acquire_ip; + const char *name, *file; + int line; + void *magic; +#endif +}; + +struct rt_mutex_waiter; +struct hrtimer_sleeper; + +#ifdef CONFIG_DEBUG_RT_MUTEXES + extern int rt_mutex_debug_check_no_locks_freed(const void *from, + unsigned long len); + extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task); +#else + static inline int rt_mutex_debug_check_no_locks_freed(const void *from, + unsigned long len) + { + return 0; + } +# define rt_mutex_debug_check_no_locks_held(task) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_RT_MUTEXES +# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ + , .name = #mutexname, .file = __FILE__, .line = __LINE__ +# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __FUNCTION__) + extern void rt_mutex_debug_task_free(struct task_struct *tsk); +#else +# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) +# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL) +# define rt_mutex_debug_task_free(t) do { } while (0) +#endif + +#define __RT_MUTEX_INITIALIZER(mutexname) \ + { .wait_lock = SPIN_LOCK_UNLOCKED \ + , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \ + , .owner = NULL \ + __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} + +#define DEFINE_RT_MUTEX(mutexname) \ + struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) + +/*** + * rt_mutex_is_locked - is the mutex locked + * @lock: the mutex to be queried + * + * Returns 1 if the mutex is locked, 0 if unlocked. + */ +static inline int rt_mutex_is_locked(struct rt_mutex *lock) +{ + return lock->owner != NULL; +} + +extern void __rt_mutex_init(struct rt_mutex *lock, const char *name); +extern void rt_mutex_destroy(struct rt_mutex *lock); + +extern void rt_mutex_lock(struct rt_mutex *lock); +extern int rt_mutex_lock_interruptible(struct rt_mutex *lock, + int detect_deadlock); +extern int rt_mutex_timed_lock(struct rt_mutex *lock, + struct hrtimer_sleeper *timeout, + int detect_deadlock); + +extern int rt_mutex_trylock(struct rt_mutex *lock); + +extern void rt_mutex_unlock(struct rt_mutex *lock); + +#ifdef CONFIG_DEBUG_RT_MUTEXES +# define INIT_RT_MUTEX_DEBUG(tsk) \ + .held_list_head = LIST_HEAD_INIT(tsk.held_list_head), \ + .held_list_lock = SPIN_LOCK_UNLOCKED +#else +# define INIT_RT_MUTEX_DEBUG(tsk) +#endif + +#ifdef CONFIG_RT_MUTEXES +# define INIT_RT_MUTEXES(tsk) \ + .pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters, tsk.pi_lock), \ + INIT_RT_MUTEX_DEBUG(tsk) +#else +# define INIT_RT_MUTEXES(tsk) +#endif + +#endif diff --git a/include/linux/sched.h b/include/linux/sched.h index 8d11d9310db..821f0481ebe 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -73,6 +73,7 @@ struct sched_param { #include <linux/seccomp.h> #include <linux/rcupdate.h> #include <linux/futex.h> +#include <linux/rtmutex.h> #include <linux/time.h> #include <linux/param.h> @@ -83,6 +84,7 @@ struct sched_param { #include <asm/processor.h> struct exec_domain; +struct futex_pi_state; /* * List of flags we want to share for kernel threads, @@ -123,6 +125,7 @@ extern unsigned long nr_running(void); extern unsigned long nr_uninterruptible(void); extern unsigned long nr_active(void); extern unsigned long nr_iowait(void); +extern unsigned long weighted_cpuload(const int cpu); /* @@ -494,8 +497,11 @@ struct signal_struct { #define MAX_PRIO (MAX_RT_PRIO + 40) -#define rt_task(p) (unlikely((p)->prio < MAX_RT_PRIO)) +#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO) +#define rt_task(p) rt_prio((p)->prio) #define batch_task(p) (unlikely((p)->policy == SCHED_BATCH)) +#define has_rt_policy(p) \ + unlikely((p)->policy != SCHED_NORMAL && (p)->policy != SCHED_BATCH) /* * Some day this will be a full-fledged user tracking system.. @@ -558,9 +564,9 @@ enum idle_type /* * sched-domains (multiprocessor balancing) declarations: */ -#ifdef CONFIG_SMP #define SCHED_LOAD_SCALE 128UL /* increase resolution of load */ +#ifdef CONFIG_SMP #define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ #define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ #define SD_BALANCE_EXEC 4 /* Balance on exec */ @@ -569,6 +575,11 @@ enum idle_type #define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */ #define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */ #define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */ +#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */ + +#define BALANCE_FOR_POWER ((sched_mc_power_savings || sched_smt_power_savings) \ + ? SD_POWERSAVINGS_BALANCE : 0) + struct sched_group { struct sched_group *next; /* Must be a circular list */ @@ -638,7 +649,7 @@ struct sched_domain { #endif }; -extern void partition_sched_domains(cpumask_t *partition1, +extern int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2); /* @@ -713,10 +724,13 @@ struct task_struct { int lock_depth; /* BKL lock depth */ -#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) +#ifdef CONFIG_SMP +#ifdef __ARCH_WANT_UNLOCKED_CTXSW int oncpu; #endif - int prio, static_prio; +#endif + int load_weight; /* for niceness load balancing purposes */ + int prio, static_prio, normal_prio; struct list_head run_list; prio_array_t *array; @@ -842,8 +856,20 @@ struct task_struct { u32 self_exec_id; /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ spinlock_t alloc_lock; -/* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */ - spinlock_t proc_lock; + + /* Protection of the PI data structures: */ + spinlock_t pi_lock; + +#ifdef CONFIG_RT_MUTEXES + /* PI waiters blocked on a rt_mutex held by this task */ + struct plist_head pi_waiters; + /* Deadlock detection and priority inheritance handling */ + struct rt_mutex_waiter *pi_blocked_on; +# ifdef CONFIG_DEBUG_RT_MUTEXES + spinlock_t held_list_lock; + struct list_head held_list_head; +# endif +#endif #ifdef CONFIG_DEBUG_MUTEXES /* mutex deadlock detection */ @@ -856,7 +882,6 @@ struct task_struct { /* VM state */ struct reclaim_state *reclaim_state; - struct dentry *proc_dentry; struct backing_dev_info *backing_dev_info; struct io_context *io_context; @@ -891,6 +916,8 @@ struct task_struct { #ifdef CONFIG_COMPAT struct compat_robust_list_head __user *compat_robust_list; #endif + struct list_head pi_state_list; + struct futex_pi_state *pi_state_cache; atomic_t fs_excl; /* holding fs exclusive resources */ struct rcu_head rcu; @@ -958,6 +985,7 @@ static inline void put_task_struct(struct task_struct *t) #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ +#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ /* * Only the _current_ task can read/write to tsk->flags, but other @@ -1012,6 +1040,19 @@ static inline void idle_task_exit(void) {} #endif extern void sched_idle_next(void); + +#ifdef CONFIG_RT_MUTEXES +extern int rt_mutex_getprio(task_t *p); +extern void rt_mutex_setprio(task_t *p, int prio); +extern void rt_mutex_adjust_pi(task_t *p); +#else +static inline int rt_mutex_getprio(task_t *p) +{ + return p->normal_prio; +} +# define rt_mutex_adjust_pi(p) do { } while (0) +#endif + extern void set_user_nice(task_t *p, long nice); extern int task_prio(const task_t *p); extern int task_nice(const task_t *p); @@ -1411,6 +1452,11 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) extern long sched_setaffinity(pid_t pid, cpumask_t new_mask); extern long sched_getaffinity(pid_t pid, cpumask_t *mask); +#include <linux/sysdev.h> +extern int sched_mc_power_savings, sched_smt_power_savings; +extern struct sysdev_attribute attr_sched_mc_power_savings, attr_sched_smt_power_savings; +extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls); + extern void normalize_rt_tasks(void); #ifdef CONFIG_PM diff --git a/include/linux/scx200.h b/include/linux/scx200.h index a22f9e173ad..693c0557e70 100644 --- a/include/linux/scx200.h +++ b/include/linux/scx200.h @@ -49,10 +49,3 @@ extern unsigned scx200_cb_base; #define SCx200_REV 0x3d /* Revision Register */ #define SCx200_CBA 0x3e /* Configuration Base Address Register */ #define SCx200_CBA_SCRATCH 0x64 /* Configuration Base Address Scratchpad */ - -/* - Local variables: - compile-command: "make -C ../.. bzImage modules" - c-basic-offset: 8 - End: -*/ diff --git a/include/linux/scx200_gpio.h b/include/linux/scx200_gpio.h index 30cdd648ba7..90dd069cc14 100644 --- a/include/linux/scx200_gpio.h +++ b/include/linux/scx200_gpio.h @@ -1,6 +1,6 @@ #include <linux/spinlock.h> -u32 scx200_gpio_configure(int index, u32 set, u32 clear); +u32 scx200_gpio_configure(unsigned index, u32 set, u32 clear); extern unsigned scx200_gpio_base; extern long scx200_gpio_shadow[2]; @@ -17,7 +17,7 @@ extern long scx200_gpio_shadow[2]; /* returns the value of the GPIO pin */ -static inline int scx200_gpio_get(int index) { +static inline int scx200_gpio_get(unsigned index) { __SCx200_GPIO_BANK; __SCx200_GPIO_IOADDR + 0x04; __SCx200_GPIO_INDEX; @@ -29,7 +29,7 @@ static inline int scx200_gpio_get(int index) { driven if the GPIO is configured as an output, it might not be the state of the GPIO right now if the GPIO is configured as an input) */ -static inline int scx200_gpio_current(int index) { +static inline int scx200_gpio_current(unsigned index) { __SCx200_GPIO_BANK; __SCx200_GPIO_INDEX; @@ -38,7 +38,7 @@ static inline int scx200_gpio_current(int index) { /* drive the GPIO signal high */ -static inline void scx200_gpio_set_high(int index) { +static inline void scx200_gpio_set_high(unsigned index) { __SCx200_GPIO_BANK; __SCx200_GPIO_IOADDR; __SCx200_GPIO_SHADOW; @@ -49,7 +49,7 @@ static inline void scx200_gpio_set_high(int index) { /* drive the GPIO signal low */ -static inline void scx200_gpio_set_low(int index) { +static inline void scx200_gpio_set_low(unsigned index) { __SCx200_GPIO_BANK; __SCx200_GPIO_IOADDR; __SCx200_GPIO_SHADOW; @@ -60,7 +60,7 @@ static inline void scx200_gpio_set_low(int index) { /* drive the GPIO signal to state */ -static inline void scx200_gpio_set(int index, int state) { +static inline void scx200_gpio_set(unsigned index, int state) { __SCx200_GPIO_BANK; __SCx200_GPIO_IOADDR; __SCx200_GPIO_SHADOW; @@ -73,7 +73,7 @@ static inline void scx200_gpio_set(int index, int state) { } /* toggle the GPIO signal */ -static inline void scx200_gpio_change(int index) { +static inline void scx200_gpio_change(unsigned index) { __SCx200_GPIO_BANK; __SCx200_GPIO_IOADDR; __SCx200_GPIO_SHADOW; @@ -87,10 +87,3 @@ static inline void scx200_gpio_change(int index) { #undef __SCx200_GPIO_SHADOW #undef __SCx200_GPIO_INDEX #undef __SCx200_GPIO_OUT - -/* - Local variables: - compile-command: "make -C ../.. bzImage modules" - c-basic-offset: 8 - End: -*/ diff --git a/include/linux/security.h b/include/linux/security.h index d2c17bd91a2..51805806f97 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -862,6 +862,7 @@ struct swap_info_struct; * Permit allocation of a key and assign security data. Note that key does * not have a serial number assigned at this point. * @key points to the key. + * @flags is the allocation flags * Return 0 if permission is granted, -ve error otherwise. * @key_free: * Notification of destruction; free security data. @@ -1324,7 +1325,7 @@ struct security_operations { /* key management security hooks */ #ifdef CONFIG_KEYS - int (*key_alloc)(struct key *key, struct task_struct *tsk); + int (*key_alloc)(struct key *key, struct task_struct *tsk, unsigned long flags); void (*key_free)(struct key *key); int (*key_permission)(key_ref_t key_ref, struct task_struct *context, @@ -3040,9 +3041,10 @@ static inline int security_xfrm_policy_lookup(struct xfrm_policy *xp, u32 sk_sid #ifdef CONFIG_KEYS #ifdef CONFIG_SECURITY static inline int security_key_alloc(struct key *key, - struct task_struct *tsk) + struct task_struct *tsk, + unsigned long flags) { - return security_ops->key_alloc(key, tsk); + return security_ops->key_alloc(key, tsk, flags); } static inline void security_key_free(struct key *key) @@ -3060,7 +3062,8 @@ static inline int security_key_permission(key_ref_t key_ref, #else static inline int security_key_alloc(struct key *key, - struct task_struct *tsk) + struct task_struct *tsk, + unsigned long flags) { return 0; } diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h index 9b8bcf125c1..6e112cc5cdd 100644 --- a/include/linux/sunrpc/gss_api.h +++ b/include/linux/sunrpc/gss_api.h @@ -126,7 +126,7 @@ struct gss_api_mech *gss_mech_get_by_pseudoflavor(u32); /* Just increments the mechanism's reference count and returns its input: */ struct gss_api_mech * gss_mech_get(struct gss_api_mech *); -/* For every succesful gss_mech_get or gss_mech_get_by_* call there must be a +/* For every successful gss_mech_get or gss_mech_get_by_* call there must be a * corresponding call to gss_mech_put. */ void gss_mech_put(struct gss_api_mech *); diff --git a/include/linux/swap.h b/include/linux/swap.h index dc3f3aa0c83..c41e2d6d1ac 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -199,6 +199,8 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) } #endif +extern int kswapd_run(int nid); + #ifdef CONFIG_MMU /* linux/mm/shmem.c */ extern int shmem_unuse(swp_entry_t entry, struct page *page); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 33785b79d54..008f04c5673 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -174,9 +174,9 @@ asmlinkage long sys_waitid(int which, pid_t pid, int options, struct rusage __user *ru); asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options); asmlinkage long sys_set_tid_address(int __user *tidptr); -asmlinkage long sys_futex(u32 __user *uaddr, int op, int val, +asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, struct timespec __user *utime, u32 __user *uaddr2, - int val3); + u32 val3); asmlinkage long sys_init_module(void __user *umod, unsigned long len, const char __user *uargs); diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 6a60770984e..46e4d8f2771 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -148,9 +148,12 @@ enum KERN_SPIN_RETRY=70, /* int: number of spinlock retries */ KERN_ACPI_VIDEO_FLAGS=71, /* int: flags for setting up video after ACPI sleep */ KERN_IA64_UNALIGNED=72, /* int: ia64 unaligned userland trap enable */ + KERN_COMPAT_LOG=73, /* int: print compat layer messages */ + KERN_MAX_LOCK_DEPTH=74, }; + /* CTL_VM names: */ enum { @@ -187,6 +190,7 @@ enum VM_ZONE_RECLAIM_MODE=31, /* reclaim local zone memory before going off node */ VM_ZONE_RECLAIM_INTERVAL=32, /* time period to wait after reclaim failure */ VM_PANIC_ON_OOM=33, /* panic at out-of-memory */ + VM_VDSO_ENABLED=34, /* map VDSO into new processes? */ }; diff --git a/include/linux/time.h b/include/linux/time.h index 0cd696cee99..c05f8bb9a32 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -28,10 +28,13 @@ struct timezone { #ifdef __KERNEL__ /* Parameters used to convert the timespec values: */ -#define MSEC_PER_SEC 1000L -#define USEC_PER_SEC 1000000L -#define NSEC_PER_SEC 1000000000L -#define NSEC_PER_USEC 1000L +#define MSEC_PER_SEC 1000L +#define USEC_PER_MSEC 1000L +#define NSEC_PER_USEC 1000L +#define NSEC_PER_MSEC 1000000L +#define USEC_PER_SEC 1000000L +#define NSEC_PER_SEC 1000000000L +#define FSEC_PER_SEC 1000000000000000L static inline int timespec_equal(struct timespec *a, struct timespec *b) { @@ -77,6 +80,8 @@ extern struct timespec xtime; extern struct timespec wall_to_monotonic; extern seqlock_t xtime_lock; +void timekeeping_init(void); + static inline unsigned long get_seconds(void) { return xtime.tv_sec; @@ -100,6 +105,7 @@ extern int do_getitimer(int which, struct itimerval *value); extern void getnstimeofday(struct timespec *tv); extern struct timespec timespec_trunc(struct timespec t, unsigned gran); +extern int timekeeping_is_continuous(void); /** * timespec_to_ns - Convert timespec to nanoseconds @@ -142,6 +148,20 @@ extern struct timespec ns_to_timespec(const s64 nsec); */ extern struct timeval ns_to_timeval(const s64 nsec); +/** + * timespec_add_ns - Adds nanoseconds to a timespec + * @a: pointer to timespec to be incremented + * @ns: unsigned nanoseconds value to be added + */ +static inline void timespec_add_ns(struct timespec *a, u64 ns) +{ + ns += a->tv_nsec; + while(unlikely(ns >= NSEC_PER_SEC)) { + ns -= NSEC_PER_SEC; + a->tv_sec++; + } + a->tv_nsec = ns; +} #endif /* __KERNEL__ */ #define NFDBITS __NFDBITS diff --git a/include/linux/timex.h b/include/linux/timex.h index 34d3ccff7bb..19bb6538b49 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -303,6 +303,8 @@ time_interpolator_reset(void) #endif /* !CONFIG_TIME_INTERPOLATION */ +#define TICK_LENGTH_SHIFT 32 + /* Returns how long ticks are at present, in ns / 2^(SHIFT_SCALE-10). */ extern u64 current_tick_length(void); diff --git a/include/linux/topology.h b/include/linux/topology.h index a305ae2e44b..ec1eca85290 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -134,7 +134,8 @@ .flags = SD_LOAD_BALANCE \ | SD_BALANCE_NEWIDLE \ | SD_BALANCE_EXEC \ - | SD_WAKE_AFFINE, \ + | SD_WAKE_AFFINE \ + | BALANCE_FOR_POWER, \ .last_balance = jiffies, \ .balance_interval = 1, \ .nr_balance_failed = 0, \ diff --git a/include/linux/unwind.h b/include/linux/unwind.h new file mode 100644 index 00000000000..ce48e2cd37a --- /dev/null +++ b/include/linux/unwind.h @@ -0,0 +1,127 @@ +#ifndef _LINUX_UNWIND_H +#define _LINUX_UNWIND_H + +/* + * Copyright (C) 2002-2006 Novell, Inc. + * Jan Beulich <jbeulich@novell.com> + * This code is released under version 2 of the GNU GPL. + * + * A simple API for unwinding kernel stacks. This is used for + * debugging and error reporting purposes. The kernel doesn't need + * full-blown stack unwinding with all the bells and whistles, so there + * is not much point in implementing the full Dwarf2 unwind API. + */ + +#include <linux/config.h> + +struct module; + +#ifdef CONFIG_STACK_UNWIND + +#include <asm/unwind.h> + +#ifndef ARCH_UNWIND_SECTION_NAME +#define ARCH_UNWIND_SECTION_NAME ".eh_frame" +#endif + +/* + * Initialize unwind support. + */ +extern void unwind_init(void); + +#ifdef CONFIG_MODULES + +extern void *unwind_add_table(struct module *, + const void *table_start, + unsigned long table_size); + +extern void unwind_remove_table(void *handle, int init_only); + +#endif + +extern int unwind_init_frame_info(struct unwind_frame_info *, + struct task_struct *, + /*const*/ struct pt_regs *); + +/* + * Prepare to unwind a blocked task. + */ +extern int unwind_init_blocked(struct unwind_frame_info *, + struct task_struct *); + +/* + * Prepare to unwind the currently running thread. + */ +extern int unwind_init_running(struct unwind_frame_info *, + asmlinkage int (*callback)(struct unwind_frame_info *, + void *arg), + void *arg); + +/* + * Unwind to previous to frame. Returns 0 if successful, negative + * number in case of an error. + */ +extern int unwind(struct unwind_frame_info *); + +/* + * Unwind until the return pointer is in user-land (or until an error + * occurs). Returns 0 if successful, negative number in case of + * error. + */ +extern int unwind_to_user(struct unwind_frame_info *); + +#else + +struct unwind_frame_info {}; + +static inline void unwind_init(void) {} + +#ifdef CONFIG_MODULES + +static inline void *unwind_add_table(struct module *mod, + const void *table_start, + unsigned long table_size) +{ + return NULL; +} + +#endif + +static inline void unwind_remove_table(void *handle, int init_only) +{ +} + +static inline int unwind_init_frame_info(struct unwind_frame_info *info, + struct task_struct *tsk, + const struct pt_regs *regs) +{ + return -ENOSYS; +} + +static inline int unwind_init_blocked(struct unwind_frame_info *info, + struct task_struct *tsk) +{ + return -ENOSYS; +} + +static inline int unwind_init_running(struct unwind_frame_info *info, + asmlinkage int (*cb)(struct unwind_frame_info *, + void *arg), + void *arg) +{ + return -ENOSYS; +} + +static inline int unwind(struct unwind_frame_info *info) +{ + return -ENOSYS; +} + +static inline int unwind_to_user(struct unwind_frame_info *info) +{ + return -ENOSYS; +} + +#endif + +#endif /* _LINUX_UNWIND_H */ diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 4f428547ec0..a62673dad76 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -245,6 +245,7 @@ struct v4l2_pix_format #define V4L2_PIX_FMT_YUV420 v4l2_fourcc('Y','U','1','2') /* 12 YUV 4:2:0 */ #define V4L2_PIX_FMT_YYUV v4l2_fourcc('Y','Y','U','V') /* 16 YUV 4:2:2 */ #define V4L2_PIX_FMT_HI240 v4l2_fourcc('H','I','2','4') /* 8 8-bit color */ +#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H','M','1','2') /* 8 YUV 4:1:1 16x16 macroblocks */ /* see http://www.siliconimaging.com/RGB%20Bayer.htm */ #define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B','A','8','1') /* 8 BGBG.. GRGR.. */ @@ -821,6 +822,11 @@ enum v4l2_mpeg_stream_type { #define V4L2_CID_MPEG_STREAM_PID_PCR (V4L2_CID_MPEG_BASE+4) #define V4L2_CID_MPEG_STREAM_PES_ID_AUDIO (V4L2_CID_MPEG_BASE+5) #define V4L2_CID_MPEG_STREAM_PES_ID_VIDEO (V4L2_CID_MPEG_BASE+6) +#define V4L2_CID_MPEG_STREAM_VBI_FMT (V4L2_CID_MPEG_BASE+7) +enum v4l2_mpeg_stream_vbi_fmt { + V4L2_MPEG_STREAM_VBI_FMT_NONE = 0, /* No VBI in the MPEG stream */ + V4L2_MPEG_STREAM_VBI_FMT_IVTV = 1, /* VBI in private packets, IVTV format */ +}; /* MPEG audio */ #define V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ (V4L2_CID_MPEG_BASE+100) diff --git a/include/media/cx2341x.h b/include/media/cx2341x.h index 51fb06b4c39..d91d88f93c8 100644 --- a/include/media/cx2341x.h +++ b/include/media/cx2341x.h @@ -25,8 +25,13 @@ enum cx2341x_port { CX2341X_PORT_SERIAL = 2 }; +enum cx2341x_cap { + CX2341X_CAP_HAS_SLICED_VBI = 1 << 0, +}; + struct cx2341x_mpeg_params { /* misc */ + u32 capabilities; enum cx2341x_port port; u16 width; u16 height; @@ -34,6 +39,7 @@ struct cx2341x_mpeg_params { /* stream */ enum v4l2_mpeg_stream_type stream_type; + enum v4l2_mpeg_stream_vbi_fmt stream_vbi_fmt; /* audio */ enum v4l2_mpeg_audio_sampling_freq audio_sampling_freq; @@ -83,9 +89,9 @@ int cx2341x_ctrl_query(struct cx2341x_mpeg_params *params, struct v4l2_queryctrl *qctrl); const char **cx2341x_ctrl_get_menu(u32 id); int cx2341x_ext_ctrls(struct cx2341x_mpeg_params *params, - struct v4l2_ext_controls *ctrls, int cmd); + struct v4l2_ext_controls *ctrls, unsigned int cmd); void cx2341x_fill_defaults(struct cx2341x_mpeg_params *p); -void cx2341x_log_status(struct cx2341x_mpeg_params *p, int cardid); +void cx2341x_log_status(struct cx2341x_mpeg_params *p, const char *prefix); /* Firmware names */ #define CX2341X_FIRM_ENC_FILENAME "v4l-cx2341x-enc.fw" diff --git a/include/net/tipc/tipc_bearer.h b/include/net/tipc/tipc_bearer.h index 098607cd4b7..e07136d74c2 100644 --- a/include/net/tipc/tipc_bearer.h +++ b/include/net/tipc/tipc_bearer.h @@ -49,10 +49,18 @@ #define TIPC_MEDIA_TYPE_ETH 1 +/* + * Destination address structure used by TIPC bearers when sending messages + * + * IMPORTANT: The fields of this structure MUST be stored using the specified + * byte order indicated below, as the structure is exchanged between nodes + * as part of a link setup process. + */ + struct tipc_media_addr { - __u32 type; + __u32 type; /* bearer type (network byte order) */ union { - __u8 eth_addr[6]; /* Ethernet bearer */ + __u8 eth_addr[6]; /* 48 bit Ethernet addr (byte array) */ #if 0 /* Prototypes for other possible bearer types */ |