diff options
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/Kbuild | 9 | ||||
-rw-r--r-- | include/asm-generic/Kbuild.asm | 67 | ||||
-rw-r--r-- | include/asm-generic/atomic.h | 47 | ||||
-rw-r--r-- | include/asm-generic/bitops/find.h | 39 | ||||
-rw-r--r-- | include/asm-generic/cmpxchg-local.h | 1 | ||||
-rw-r--r-- | include/asm-generic/dma-mapping-broken.h | 3 | ||||
-rw-r--r-- | include/asm-generic/fcntl.h | 14 | ||||
-rw-r--r-- | include/asm-generic/gpio.h | 14 | ||||
-rw-r--r-- | include/asm-generic/hardirq.h | 2 | ||||
-rw-r--r-- | include/asm-generic/io.h | 30 | ||||
-rw-r--r-- | include/asm-generic/ioctls.h | 10 | ||||
-rw-r--r-- | include/asm-generic/irqflags.h | 52 | ||||
-rw-r--r-- | include/asm-generic/kdebug.h | 2 | ||||
-rw-r--r-- | include/asm-generic/local64.h | 96 | ||||
-rw-r--r-- | include/asm-generic/percpu.h | 29 | ||||
-rw-r--r-- | include/asm-generic/pgtable.h | 4 | ||||
-rw-r--r-- | include/asm-generic/statfs.h | 9 | ||||
-rw-r--r-- | include/asm-generic/syscalls.h | 6 | ||||
-rw-r--r-- | include/asm-generic/system.h | 20 | ||||
-rw-r--r-- | include/asm-generic/termbits.h | 1 | ||||
-rw-r--r-- | include/asm-generic/topology.h | 20 | ||||
-rw-r--r-- | include/asm-generic/unistd.h | 39 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 18 |
23 files changed, 382 insertions, 150 deletions
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index eb62334cda2..53f91b1ae53 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -3,6 +3,8 @@ header-y += bitsperlong.h header-y += errno-base.h header-y += errno.h header-y += fcntl.h +header-y += int-l64.h +header-y += int-ll64.h header-y += ioctl.h header-y += ioctls.h header-y += ipcbuf.h @@ -12,10 +14,12 @@ header-y += msgbuf.h header-y += param.h header-y += poll.h header-y += posix_types.h +header-y += resource.h header-y += sembuf.h header-y += setup.h header-y += shmbuf.h header-y += shmparam.h +header-y += siginfo.h header-y += signal-defs.h header-y += signal.h header-y += socket.h @@ -28,8 +32,3 @@ header-y += termios.h header-y += types.h header-y += ucontext.h header-y += unistd.h - -unifdef-y += int-l64.h -unifdef-y += int-ll64.h -unifdef-y += resource.h -unifdef-y += siginfo.h diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm index 96d7c9804dc..c5d2e5dd871 100644 --- a/include/asm-generic/Kbuild.asm +++ b/include/asm-generic/Kbuild.asm @@ -1,5 +1,5 @@ ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm.h \ - $(srctree)/include/asm-$(SRCARCH)/kvm.h),) + $(srctree)/include/asm-$(SRCARCH)/kvm.h),) header-y += kvm.h endif @@ -9,36 +9,37 @@ header-y += kvm_para.h endif ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/a.out.h \ - $(srctree)/include/asm-$(SRCARCH)/a.out.h),) -unifdef-y += a.out.h + $(srctree)/include/asm-$(SRCARCH)/a.out.h),) +header-y += a.out.h endif -unifdef-y += auxvec.h -unifdef-y += byteorder.h -unifdef-y += bitsperlong.h -unifdef-y += errno.h -unifdef-y += fcntl.h -unifdef-y += ioctl.h -unifdef-y += ioctls.h -unifdef-y += ipcbuf.h -unifdef-y += mman.h -unifdef-y += msgbuf.h -unifdef-y += param.h -unifdef-y += poll.h -unifdef-y += posix_types.h -unifdef-y += ptrace.h -unifdef-y += resource.h -unifdef-y += sembuf.h -unifdef-y += setup.h -unifdef-y += shmbuf.h -unifdef-y += sigcontext.h -unifdef-y += siginfo.h -unifdef-y += signal.h -unifdef-y += socket.h -unifdef-y += sockios.h -unifdef-y += stat.h -unifdef-y += statfs.h -unifdef-y += swab.h -unifdef-y += termbits.h -unifdef-y += termios.h -unifdef-y += types.h -unifdef-y += unistd.h + +header-y += auxvec.h +header-y += bitsperlong.h +header-y += byteorder.h +header-y += errno.h +header-y += fcntl.h +header-y += ioctl.h +header-y += ioctls.h +header-y += ipcbuf.h +header-y += mman.h +header-y += msgbuf.h +header-y += param.h +header-y += poll.h +header-y += posix_types.h +header-y += ptrace.h +header-y += resource.h +header-y += sembuf.h +header-y += setup.h +header-y += shmbuf.h +header-y += sigcontext.h +header-y += siginfo.h +header-y += signal.h +header-y += socket.h +header-y += sockios.h +header-y += stat.h +header-y += statfs.h +header-y += swab.h +header-y += termbits.h +header-y += termios.h +header-y += types.h +header-y += unistd.h diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 058129e9b04..e994197f84b 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -43,6 +43,7 @@ */ #define atomic_set(v, i) (((v)->counter) = (i)) +#include <linux/irqflags.h> #include <asm/system.h> /** @@ -57,11 +58,11 @@ static inline int atomic_add_return(int i, atomic_t *v) unsigned long flags; int temp; - local_irq_save(flags); + raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ temp = v->counter; temp += i; v->counter = temp; - local_irq_restore(flags); + raw_local_irq_restore(flags); return temp; } @@ -78,11 +79,11 @@ static inline int atomic_sub_return(int i, atomic_t *v) unsigned long flags; int temp; - local_irq_save(flags); + raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ temp = v->counter; temp -= i; v->counter = temp; - local_irq_restore(flags); + raw_local_irq_restore(flags); return temp; } @@ -119,14 +120,23 @@ static inline void atomic_dec(atomic_t *v) #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) -#define atomic_add_unless(v, a, u) \ -({ \ - int c, old; \ - c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ - c = old; \ - c != (u); \ -}) +#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) +#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) + +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ + (unsigned long)(n), sizeof(*(ptr)))) + +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) + +static inline int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) + c = old; + return c != u; +} #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) @@ -135,20 +145,11 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) unsigned long flags; mask = ~mask; - local_irq_save(flags); + raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ *addr &= mask; - local_irq_restore(flags); + raw_local_irq_restore(flags); } -#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) -#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) - -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ - (unsigned long)(n), sizeof(*(ptr)))) - -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) - /* Assume that atomic operations are already serializing */ #define smp_mb__before_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier() diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h index 1914e974251..110fa700f85 100644 --- a/include/asm-generic/bitops/find.h +++ b/include/asm-generic/bitops/find.h @@ -1,15 +1,50 @@ #ifndef _ASM_GENERIC_BITOPS_FIND_H_ #define _ASM_GENERIC_BITOPS_FIND_H_ -#ifndef CONFIG_GENERIC_FIND_NEXT_BIT +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + */ extern unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset); +/** + * find_next_zero_bit - find the next cleared bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + */ extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset); -#endif + +#ifdef CONFIG_GENERIC_FIND_FIRST_BIT + +/** + * find_first_bit - find the first set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit number of the first set bit. + */ +extern unsigned long find_first_bit(const unsigned long *addr, + unsigned long size); + +/** + * find_first_zero_bit - find the first cleared bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit number of the first cleared bit. + */ +extern unsigned long find_first_zero_bit(const unsigned long *addr, + unsigned long size); +#else /* CONFIG_GENERIC_FIND_FIRST_BIT */ #define find_first_bit(addr, size) find_next_bit((addr), (size), 0) #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) +#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ + #endif /*_ASM_GENERIC_BITOPS_FIND_H_ */ diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h index b2ba2fc8829..2533fddd34a 100644 --- a/include/asm-generic/cmpxchg-local.h +++ b/include/asm-generic/cmpxchg-local.h @@ -2,6 +2,7 @@ #define __ASM_GENERIC_CMPXCHG_LOCAL_H #include <linux/types.h> +#include <linux/irqflags.h> extern unsigned long wrong_size_cmpxchg(volatile void *ptr); diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h index 82cd0cb1c3f..ccf7b4f34a3 100644 --- a/include/asm-generic/dma-mapping-broken.h +++ b/include/asm-generic/dma-mapping-broken.h @@ -72,9 +72,6 @@ dma_set_mask(struct device *dev, u64 mask); extern int dma_get_cache_alignment(void); -extern int -dma_is_consistent(struct device *dev, dma_addr_t dma_handle); - extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction); diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h index fcd268ce067..0fc16e3f0bf 100644 --- a/include/asm-generic/fcntl.h +++ b/include/asm-generic/fcntl.h @@ -3,6 +3,18 @@ #include <linux/types.h> +/* + * FMODE_EXEC is 0x20 + * FMODE_NONOTIFY is 0x1000000 + * These cannot be used by userspace O_* until internal and external open + * flags are split. + * -Eric Paris + */ + +/* + * When introducing new O_* bits, please check its uniqueness in fcntl_init(). + */ + #define O_ACCMODE 00000003 #define O_RDONLY 00000000 #define O_WRONLY 00000001 @@ -110,7 +122,7 @@ struct f_owner_ex { int type; - pid_t pid; + __kernel_pid_t pid; }; /* for F_[GET|SET]FL */ diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index c7376bf80b0..8ca18e26d7e 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h @@ -16,15 +16,27 @@ * While the GPIO programming interface defines valid GPIO numbers * to be in the range 0..MAX_INT, this library restricts them to the * smaller range 0..ARCH_NR_GPIOS-1. + * + * ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of + * builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is + * actually an estimate of a board-specific value. */ #ifndef ARCH_NR_GPIOS #define ARCH_NR_GPIOS 256 #endif +/* + * "valid" GPIO numbers are nonnegative and may be passed to + * setup routines like gpio_request(). only some valid numbers + * can successfully be requested and used. + * + * Invalid GPIO numbers are useful for indicating no-such-GPIO in + * platform data and other tables. + */ + static inline int gpio_is_valid(int number) { - /* only some non-negative numbers are valid */ return ((unsigned)number) < ARCH_NR_GPIOS; } diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h index 62f59080e5c..04d0a977cd4 100644 --- a/include/asm-generic/hardirq.h +++ b/include/asm-generic/hardirq.h @@ -3,13 +3,13 @@ #include <linux/cache.h> #include <linux/threads.h> -#include <linux/irq.h> typedef struct { unsigned int __softirq_pending; } ____cacheline_aligned irq_cpustat_t; #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ +#include <linux/irq.h> #ifndef ack_bad_irq static inline void ack_bad_irq(unsigned int irq) diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index bcee6365dca..3577ca11a0b 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -19,7 +19,9 @@ #include <asm-generic/iomap.h> #endif +#ifndef mmiowb #define mmiowb() do {} while (0) +#endif /*****************************************************************************/ /* @@ -28,39 +30,51 @@ * differently. On the simple architectures, we just read/write the * memory location directly. */ +#ifndef __raw_readb static inline u8 __raw_readb(const volatile void __iomem *addr) { return *(const volatile u8 __force *) addr; } +#endif +#ifndef __raw_readw static inline u16 __raw_readw(const volatile void __iomem *addr) { return *(const volatile u16 __force *) addr; } +#endif +#ifndef __raw_readl static inline u32 __raw_readl(const volatile void __iomem *addr) { return *(const volatile u32 __force *) addr; } +#endif #define readb __raw_readb #define readw(addr) __le16_to_cpu(__raw_readw(addr)) #define readl(addr) __le32_to_cpu(__raw_readl(addr)) +#ifndef __raw_writeb static inline void __raw_writeb(u8 b, volatile void __iomem *addr) { *(volatile u8 __force *) addr = b; } +#endif +#ifndef __raw_writew static inline void __raw_writew(u16 b, volatile void __iomem *addr) { *(volatile u16 __force *) addr = b; } +#endif +#ifndef __raw_writel static inline void __raw_writel(u32 b, volatile void __iomem *addr) { *(volatile u32 __force *) addr = b; } +#endif #define writeb __raw_writeb #define writew(b,addr) __raw_writew(__cpu_to_le16(b),addr) @@ -122,6 +136,7 @@ static inline void outl(u32 b, unsigned long addr) #define outw_p(x, addr) outw((x), (addr)) #define outl_p(x, addr) outl((x), (addr)) +#ifndef insb static inline void insb(unsigned long addr, void *buffer, int count) { if (count) { @@ -132,7 +147,9 @@ static inline void insb(unsigned long addr, void *buffer, int count) } while (--count); } } +#endif +#ifndef insw static inline void insw(unsigned long addr, void *buffer, int count) { if (count) { @@ -143,7 +160,9 @@ static inline void insw(unsigned long addr, void *buffer, int count) } while (--count); } } +#endif +#ifndef insl static inline void insl(unsigned long addr, void *buffer, int count) { if (count) { @@ -154,7 +173,9 @@ static inline void insl(unsigned long addr, void *buffer, int count) } while (--count); } } +#endif +#ifndef outsb static inline void outsb(unsigned long addr, const void *buffer, int count) { if (count) { @@ -164,7 +185,9 @@ static inline void outsb(unsigned long addr, const void *buffer, int count) } while (--count); } } +#endif +#ifndef outsw static inline void outsw(unsigned long addr, const void *buffer, int count) { if (count) { @@ -174,7 +197,9 @@ static inline void outsw(unsigned long addr, const void *buffer, int count) } while (--count); } } +#endif +#ifndef outsl static inline void outsl(unsigned long addr, const void *buffer, int count) { if (count) { @@ -184,15 +209,20 @@ static inline void outsl(unsigned long addr, const void *buffer, int count) } while (--count); } } +#endif #ifndef CONFIG_GENERIC_IOMAP #define ioread8(addr) readb(addr) #define ioread16(addr) readw(addr) +#define ioread16be(addr) be16_to_cpu(ioread16(addr)) #define ioread32(addr) readl(addr) +#define ioread32be(addr) be32_to_cpu(ioread32(addr)) #define iowrite8(v, addr) writeb((v), (addr)) #define iowrite16(v, addr) writew((v), (addr)) +#define iowrite16be(v, addr) iowrite16(be16_to_cpu(v), (addr)) #define iowrite32(v, addr) writel((v), (addr)) +#define iowrite32be(v, addr) iowrite32(be32_to_cpu(v), (addr)) #define ioread8_rep(p, dst, count) \ insb((unsigned long) (p), (dst), (count)) diff --git a/include/asm-generic/ioctls.h b/include/asm-generic/ioctls.h index a799e20a769..a3216655d65 100644 --- a/include/asm-generic/ioctls.h +++ b/include/asm-generic/ioctls.h @@ -62,13 +62,16 @@ #define TCSETSW2 _IOW('T', 0x2C, struct termios2) #define TCSETSF2 _IOW('T', 0x2D, struct termios2) #define TIOCGRS485 0x542E +#ifndef TIOCSRS485 #define TIOCSRS485 0x542F +#endif #define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ #define TCGETX 0x5432 /* SYS5 TCGETX compatibility */ #define TCSETX 0x5433 #define TCSETXF 0x5434 #define TCSETXW 0x5435 +#define TIOCSIG _IOW('T', 0x36, int) /* pty: generate signal */ #define FIONCLEX 0x5450 #define FIOCLEX 0x5451 @@ -87,12 +90,10 @@ #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ /* - * some architectures define FIOQSIZE as 0x545E, which is used for - * TIOCGHAYESESP on others + * Some arches already define FIOQSIZE due to a historical + * conflict with a Hayes modem-specific ioctl value. */ #ifndef FIOQSIZE -# define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ -# define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ # define FIOQSIZE 0x5460 #endif @@ -104,6 +105,7 @@ #define TIOCPKT_START 8 #define TIOCPKT_NOSTOP 16 #define TIOCPKT_DOSTOP 32 +#define TIOCPKT_IOCTL 64 #define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ diff --git a/include/asm-generic/irqflags.h b/include/asm-generic/irqflags.h index 9aebf618275..1f40d0024cf 100644 --- a/include/asm-generic/irqflags.h +++ b/include/asm-generic/irqflags.h @@ -5,68 +5,62 @@ * All architectures should implement at least the first two functions, * usually inline assembly will be the best way. */ -#ifndef RAW_IRQ_DISABLED -#define RAW_IRQ_DISABLED 0 -#define RAW_IRQ_ENABLED 1 +#ifndef ARCH_IRQ_DISABLED +#define ARCH_IRQ_DISABLED 0 +#define ARCH_IRQ_ENABLED 1 #endif /* read interrupt enabled status */ -#ifndef __raw_local_save_flags -unsigned long __raw_local_save_flags(void); +#ifndef arch_local_save_flags +unsigned long arch_local_save_flags(void); #endif /* set interrupt enabled status */ -#ifndef raw_local_irq_restore -void raw_local_irq_restore(unsigned long flags); +#ifndef arch_local_irq_restore +void arch_local_irq_restore(unsigned long flags); #endif /* get status and disable interrupts */ -#ifndef __raw_local_irq_save -static inline unsigned long __raw_local_irq_save(void) +#ifndef arch_local_irq_save +static inline unsigned long arch_local_irq_save(void) { unsigned long flags; - flags = __raw_local_save_flags(); - raw_local_irq_restore(RAW_IRQ_DISABLED); + flags = arch_local_save_flags(); + arch_local_irq_restore(ARCH_IRQ_DISABLED); return flags; } #endif /* test flags */ -#ifndef raw_irqs_disabled_flags -static inline int raw_irqs_disabled_flags(unsigned long flags) +#ifndef arch_irqs_disabled_flags +static inline int arch_irqs_disabled_flags(unsigned long flags) { - return flags == RAW_IRQ_DISABLED; + return flags == ARCH_IRQ_DISABLED; } #endif /* unconditionally enable interrupts */ -#ifndef raw_local_irq_enable -static inline void raw_local_irq_enable(void) +#ifndef arch_local_irq_enable +static inline void arch_local_irq_enable(void) { - raw_local_irq_restore(RAW_IRQ_ENABLED); + arch_local_irq_restore(ARCH_IRQ_ENABLED); } #endif /* unconditionally disable interrupts */ -#ifndef raw_local_irq_disable -static inline void raw_local_irq_disable(void) +#ifndef arch_local_irq_disable +static inline void arch_local_irq_disable(void) { - raw_local_irq_restore(RAW_IRQ_DISABLED); + arch_local_irq_restore(ARCH_IRQ_DISABLED); } #endif /* test hardware interrupt enable bit */ -#ifndef raw_irqs_disabled -static inline int raw_irqs_disabled(void) +#ifndef arch_irqs_disabled +static inline int arch_irqs_disabled(void) { - return raw_irqs_disabled_flags(__raw_local_save_flags()); + return arch_irqs_disabled_flags(arch_local_save_flags()); } #endif -#define raw_local_save_flags(flags) \ - do { (flags) = __raw_local_save_flags(); } while (0) - -#define raw_local_irq_save(flags) \ - do { (flags) = __raw_local_irq_save(); } while (0) - #endif /* __ASM_GENERIC_IRQFLAGS_H */ diff --git a/include/asm-generic/kdebug.h b/include/asm-generic/kdebug.h index 11e57b6a85f..d1814497bcd 100644 --- a/include/asm-generic/kdebug.h +++ b/include/asm-generic/kdebug.h @@ -3,7 +3,7 @@ enum die_val { DIE_UNUSED, - DIE_OOPS=1 + DIE_OOPS = 1, }; #endif /* _ASM_GENERIC_KDEBUG_H */ diff --git a/include/asm-generic/local64.h b/include/asm-generic/local64.h new file mode 100644 index 00000000000..02ac760c1a8 --- /dev/null +++ b/include/asm-generic/local64.h @@ -0,0 +1,96 @@ +#ifndef _ASM_GENERIC_LOCAL64_H +#define _ASM_GENERIC_LOCAL64_H + +#include <linux/percpu.h> +#include <asm/types.h> + +/* + * A signed long type for operations which are atomic for a single CPU. + * Usually used in combination with per-cpu variables. + * + * This is the default implementation, which uses atomic64_t. Which is + * rather pointless. The whole point behind local64_t is that some processors + * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs + * running on this CPU. local64_t allows exploitation of such capabilities. + */ + +/* Implement in terms of atomics. */ + +#if BITS_PER_LONG == 64 + +#include <asm/local.h> + +typedef struct { + local_t a; +} local64_t; + +#define LOCAL64_INIT(i) { LOCAL_INIT(i) } + +#define local64_read(l) local_read(&(l)->a) +#define local64_set(l,i) local_set((&(l)->a),(i)) +#define local64_inc(l) local_inc(&(l)->a) +#define local64_dec(l) local_dec(&(l)->a) +#define local64_add(i,l) local_add((i),(&(l)->a)) +#define local64_sub(i,l) local_sub((i),(&(l)->a)) + +#define local64_sub_and_test(i, l) local_sub_and_test((i), (&(l)->a)) +#define local64_dec_and_test(l) local_dec_and_test(&(l)->a) +#define local64_inc_and_test(l) local_inc_and_test(&(l)->a) +#define local64_add_negative(i, l) local_add_negative((i), (&(l)->a)) +#define local64_add_return(i, l) local_add_return((i), (&(l)->a)) +#define local64_sub_return(i, l) local_sub_return((i), (&(l)->a)) +#define local64_inc_return(l) local_inc_return(&(l)->a) + +#define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n)) +#define local64_xchg(l, n) local_xchg((&(l)->a), (n)) +#define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u)) +#define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a) + +/* Non-atomic variants, ie. preemption disabled and won't be touched + * in interrupt, etc. Some archs can optimize this case well. */ +#define __local64_inc(l) local64_set((l), local64_read(l) + 1) +#define __local64_dec(l) local64_set((l), local64_read(l) - 1) +#define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) +#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) + +#else /* BITS_PER_LONG != 64 */ + +#include <asm/atomic.h> + +/* Don't use typedef: don't want them to be mixed with atomic_t's. */ +typedef struct { + atomic64_t a; +} local64_t; + +#define LOCAL64_INIT(i) { ATOMIC_LONG_INIT(i) } + +#define local64_read(l) atomic64_read(&(l)->a) +#define local64_set(l,i) atomic64_set((&(l)->a),(i)) +#define local64_inc(l) atomic64_inc(&(l)->a) +#define local64_dec(l) atomic64_dec(&(l)->a) +#define local64_add(i,l) atomic64_add((i),(&(l)->a)) +#define local64_sub(i,l) atomic64_sub((i),(&(l)->a)) + +#define local64_sub_and_test(i, l) atomic64_sub_and_test((i), (&(l)->a)) +#define local64_dec_and_test(l) atomic64_dec_and_test(&(l)->a) +#define local64_inc_and_test(l) atomic64_inc_and_test(&(l)->a) +#define local64_add_negative(i, l) atomic64_add_negative((i), (&(l)->a)) +#define local64_add_return(i, l) atomic64_add_return((i), (&(l)->a)) +#define local64_sub_return(i, l) atomic64_sub_return((i), (&(l)->a)) +#define local64_inc_return(l) atomic64_inc_return(&(l)->a) + +#define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n)) +#define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n)) +#define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u)) +#define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a) + +/* Non-atomic variants, ie. preemption disabled and won't be touched + * in interrupt, etc. Some archs can optimize this case well. */ +#define __local64_inc(l) local64_set((l), local64_read(l) + 1) +#define __local64_dec(l) local64_set((l), local64_read(l) - 1) +#define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) +#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) + +#endif /* BITS_PER_LONG != 64 */ + +#endif /* _ASM_GENERIC_LOCAL64_H */ diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index b5043a9890d..d17784ea37f 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -55,14 +55,18 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; */ #define per_cpu(var, cpu) \ (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu))) -#define __get_cpu_var(var) \ - (*SHIFT_PERCPU_PTR(&(var), my_cpu_offset)) -#define __raw_get_cpu_var(var) \ - (*SHIFT_PERCPU_PTR(&(var), __my_cpu_offset)) -#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset) +#ifndef __this_cpu_ptr #define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) +#endif +#ifdef CONFIG_DEBUG_PREEMPT +#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset) +#else +#define this_cpu_ptr(ptr) __this_cpu_ptr(ptr) +#endif +#define __get_cpu_var(var) (*this_cpu_ptr(&(var))) +#define __raw_get_cpu_var(var) (*__this_cpu_ptr(&(var))) #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA extern void setup_per_cpu_areas(void); @@ -70,11 +74,16 @@ extern void setup_per_cpu_areas(void); #else /* ! SMP */ -#define per_cpu(var, cpu) (*((void)(cpu), &(var))) -#define __get_cpu_var(var) (var) -#define __raw_get_cpu_var(var) (var) -#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) -#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr) +#define VERIFY_PERCPU_PTR(__p) ({ \ + __verify_pcpu_ptr((__p)); \ + (typeof(*(__p)) __kernel __force *)(__p); \ +}) + +#define per_cpu(var, cpu) (*((void)(cpu), VERIFY_PERCPU_PTR(&(var)))) +#define __get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) +#define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) +#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) +#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr) #endif /* SMP */ diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index e2bd73e8f9c..f4d4120e512 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -129,6 +129,10 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres #define move_pte(pte, prot, old_addr, new_addr) (pte) #endif +#ifndef flush_tlb_fix_spurious_fault +#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) +#endif + #ifndef pgprot_noncached #define pgprot_noncached(prot) (prot) #endif diff --git a/include/asm-generic/statfs.h b/include/asm-generic/statfs.h index 3b4fb3e52f0..0fd28e028de 100644 --- a/include/asm-generic/statfs.h +++ b/include/asm-generic/statfs.h @@ -33,7 +33,8 @@ struct statfs { __kernel_fsid_t f_fsid; __statfs_word f_namelen; __statfs_word f_frsize; - __statfs_word f_spare[5]; + __statfs_word f_flags; + __statfs_word f_spare[4]; }; /* @@ -55,7 +56,8 @@ struct statfs64 { __kernel_fsid_t f_fsid; __statfs_word f_namelen; __statfs_word f_frsize; - __statfs_word f_spare[5]; + __statfs_word f_flags; + __statfs_word f_spare[4]; } ARCH_PACK_STATFS64; /* @@ -77,7 +79,8 @@ struct compat_statfs64 { __kernel_fsid_t f_fsid; __u32 f_namelen; __u32 f_frsize; - __u32 f_spare[5]; + __u32 f_flags; + __u32 f_spare[4]; } ARCH_PACK_COMPAT_STATFS64; #endif diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h index df84e3b0455..d89dec864d4 100644 --- a/include/asm-generic/syscalls.h +++ b/include/asm-generic/syscalls.h @@ -23,8 +23,10 @@ asmlinkage long sys_vfork(struct pt_regs *regs); #endif #ifndef sys_execve -asmlinkage long sys_execve(char __user *filename, char __user * __user *argv, - char __user * __user *envp, struct pt_regs *regs); +asmlinkage long sys_execve(const char __user *filename, + const char __user *const __user *argv, + const char __user *const __user *envp, + struct pt_regs *regs); #endif #ifndef sys_mmap2 diff --git a/include/asm-generic/system.h b/include/asm-generic/system.h index efa403b5e12..4b0b9cbbfae 100644 --- a/include/asm-generic/system.h +++ b/include/asm-generic/system.h @@ -21,6 +21,7 @@ #include <linux/irqflags.h> #include <asm/cmpxchg-local.h> +#include <asm/cmpxchg.h> struct task_struct; @@ -136,25 +137,6 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size) #define xchg(ptr, x) \ ((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) -static inline unsigned long __cmpxchg(volatile unsigned long *m, - unsigned long old, unsigned long new) -{ - unsigned long retval; - unsigned long flags; - - local_irq_save(flags); - retval = *m; - if (retval == old) - *m = new; - local_irq_restore(flags); - return retval; -} - -#define cmpxchg(ptr, o, n) \ - ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \ - (unsigned long)(o), \ - (unsigned long)(n))) - #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/include/asm-generic/termbits.h b/include/asm-generic/termbits.h index 1c9773d48cb..232b4781aef 100644 --- a/include/asm-generic/termbits.h +++ b/include/asm-generic/termbits.h @@ -178,6 +178,7 @@ struct ktermios { #define FLUSHO 0010000 #define PENDIN 0040000 #define IEXTEN 0100000 +#define EXTPROC 0200000 /* tcflow() and TCXONC use these */ #define TCOOFF 0 diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h index fd60700503c..fc824e2828f 100644 --- a/include/asm-generic/topology.h +++ b/include/asm-generic/topology.h @@ -5,7 +5,7 @@ * * Copyright (C) 2002, IBM Corp. * - * All rights reserved. + * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -34,9 +34,16 @@ #ifndef cpu_to_node #define cpu_to_node(cpu) ((void)(cpu),0) #endif +#ifndef set_numa_node +#define set_numa_node(node) +#endif +#ifndef set_cpu_numa_node +#define set_cpu_numa_node(cpu, node) +#endif #ifndef cpu_to_mem #define cpu_to_mem(cpu) ((void)(cpu),0) #endif + #ifndef parent_node #define parent_node(node) ((void)(node),0) #endif @@ -55,4 +62,15 @@ #endif /* CONFIG_NUMA */ +#if !defined(CONFIG_NUMA) || !defined(CONFIG_HAVE_MEMORYLESS_NODES) + +#ifndef set_numa_mem +#define set_numa_mem(node) +#endif +#ifndef set_cpu_numa_mem +#define set_cpu_numa_mem(cpu, node) +#endif + +#endif /* !CONFIG_NUMA || !CONFIG_HAVE_MEMORYLESS_NODES */ + #endif /* _ASM_GENERIC_TOPOLOGY_H */ diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h index 6a0b30f78a6..b969770196c 100644 --- a/include/asm-generic/unistd.h +++ b/include/asm-generic/unistd.h @@ -18,7 +18,7 @@ #define __SYSCALL(x, y) #endif -#if __BITS_PER_LONG == 32 +#if __BITS_PER_LONG == 32 || defined(__SYSCALL_COMPAT) #define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _32) #else #define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _64) @@ -241,8 +241,13 @@ __SYSCALL(__NR_sync, sys_sync) __SYSCALL(__NR_fsync, sys_fsync) #define __NR_fdatasync 83 __SYSCALL(__NR_fdatasync, sys_fdatasync) +#ifdef __ARCH_WANT_SYNC_FILE_RANGE2 +#define __NR_sync_file_range2 84 +__SYSCALL(__NR_sync_file_range2, sys_sync_file_range2) +#else #define __NR_sync_file_range 84 -__SYSCALL(__NR_sync_file_range, sys_sync_file_range) /* .long sys_sync_file_range2, */ +__SYSCALL(__NR_sync_file_range, sys_sync_file_range) +#endif /* fs/timerfd.c */ #define __NR_timerfd_create 85 @@ -580,7 +585,7 @@ __SYSCALL(__NR_execve, sys_execve) /* .long sys_execve_wrapper */ __SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap) /* mm/fadvise.c */ #define __NR3264_fadvise64 223 -__SC_3264(__NR3264_fadvise64, sys_fadvise64_64, sys_fadvise64) +__SYSCALL(__NR3264_fadvise64, sys_fadvise64_64) /* mm/, CONFIG_MMU only */ #ifndef __ARCH_NOMMU @@ -627,8 +632,23 @@ __SYSCALL(__NR_accept4, sys_accept4) #define __NR_recvmmsg 243 __SYSCALL(__NR_recvmmsg, sys_recvmmsg) +/* + * Architectures may provide up to 16 syscalls of their own + * starting with this value. + */ +#define __NR_arch_specific_syscall 244 + +#define __NR_wait4 260 +__SYSCALL(__NR_wait4, sys_wait4) +#define __NR_prlimit64 261 +__SYSCALL(__NR_prlimit64, sys_prlimit64) +#define __NR_fanotify_init 262 +__SYSCALL(__NR_fanotify_init, sys_fanotify_init) +#define __NR_fanotify_mark 263 +__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark) + #undef __NR_syscalls -#define __NR_syscalls 244 +#define __NR_syscalls 264 /* * All syscalls below here should go away really, @@ -694,7 +714,8 @@ __SYSCALL(__NR_signalfd, sys_signalfd) #define __NR_syscalls (__NR_signalfd+1) #endif /* __ARCH_WANT_SYSCALL_NO_FLAGS */ -#if __BITS_PER_LONG == 32 && defined(__ARCH_WANT_SYSCALL_OFF_T) +#if (__BITS_PER_LONG == 32 || defined(__SYSCALL_COMPAT)) && \ + defined(__ARCH_WANT_SYSCALL_OFF_T) #define __NR_sendfile 1046 __SYSCALL(__NR_sendfile, sys_sendfile) #define __NR_ftruncate 1047 @@ -740,6 +761,7 @@ __SYSCALL(__NR_getpgrp, sys_getpgrp) __SYSCALL(__NR_pause, sys_pause) #define __NR_time 1062 #define __ARCH_WANT_SYS_TIME +#define __ARCH_WANT_COMPAT_SYS_TIME __SYSCALL(__NR_time, sys_time) #define __NR_utime 1063 #define __ARCH_WANT_SYS_UTIME @@ -763,8 +785,8 @@ __SYSCALL(__NR_epoll_wait, sys_epoll_wait) __SYSCALL(__NR_ustat, sys_ustat) #define __NR_vfork 1071 __SYSCALL(__NR_vfork, sys_vfork) -#define __NR_wait4 1072 -__SYSCALL(__NR_wait4, sys_wait4) +#define __NR_oldwait4 1072 +__SYSCALL(__NR_oldwait4, sys_wait4) #define __NR_recv 1073 __SYSCALL(__NR_recv, sys_recv) #define __NR_send 1074 @@ -801,7 +823,7 @@ __SYSCALL(__NR_fork, sys_ni_syscall) * Here we map the numbers so that both versions * use the same syscall table layout. */ -#if __BITS_PER_LONG == 64 +#if __BITS_PER_LONG == 64 && !defined(__SYSCALL_COMPAT) #define __NR_fcntl __NR3264_fcntl #define __NR_statfs __NR3264_statfs #define __NR_fstatfs __NR3264_fstatfs @@ -848,6 +870,7 @@ __SYSCALL(__NR_fork, sys_ni_syscall) #endif #define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGSUSPEND +#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND /* * "Conditional" syscalls diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 4e7ae600205..f4229fb315e 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -156,10 +156,6 @@ CPU_KEEP(exit.data) \ MEM_KEEP(init.data) \ MEM_KEEP(exit.data) \ - . = ALIGN(8); \ - VMLINUX_SYMBOL(__start___markers) = .; \ - *(__markers) \ - VMLINUX_SYMBOL(__stop___markers) = .; \ . = ALIGN(32); \ VMLINUX_SYMBOL(__start___tracepoints) = .; \ *(__tracepoints) \ @@ -224,6 +220,8 @@ \ BUG_TABLE \ \ + JUMP_TABLE \ + \ /* PCI quirks */ \ .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ @@ -567,6 +565,14 @@ #define BUG_TABLE #endif +#define JUMP_TABLE \ + . = ALIGN(8); \ + __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___jump_table) = .; \ + *(__jump_table) \ + VMLINUX_SYMBOL(__stop___jump_table) = .; \ + } + #ifdef CONFIG_PM_TRACE #define TRACEDATA \ . = ALIGN(4); \ @@ -681,7 +687,9 @@ - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__per_cpu_start) = .; \ *(.data..percpu..first) \ + . = ALIGN(PAGE_SIZE); \ *(.data..percpu..page_aligned) \ + *(.data..percpu..readmostly) \ *(.data..percpu) \ *(.data..percpu..shared_aligned) \ VMLINUX_SYMBOL(__per_cpu_end) = .; \ @@ -707,7 +715,9 @@ VMLINUX_SYMBOL(__per_cpu_load) = .; \ VMLINUX_SYMBOL(__per_cpu_start) = .; \ *(.data..percpu..first) \ + . = ALIGN(PAGE_SIZE); \ *(.data..percpu..page_aligned) \ + *(.data..percpu..readmostly) \ *(.data..percpu) \ *(.data..percpu..shared_aligned) \ VMLINUX_SYMBOL(__per_cpu_end) = .; \ |