diff options
Diffstat (limited to 'arch/sparc/include')
-rw-r--r-- | arch/sparc/include/asm/asm-offsets.h | 1 | ||||
-rw-r--r-- | arch/sparc/include/asm/elf_32.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/elf_64.h | 14 | ||||
-rw-r--r-- | arch/sparc/include/asm/fcntl.h | 19 | ||||
-rw-r--r-- | arch/sparc/include/asm/io_32.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/page_32.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/param.h | 19 | ||||
-rw-r--r-- | arch/sparc/include/asm/pci_64.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/spinlock_32.h | 62 | ||||
-rw-r--r-- | arch/sparc/include/asm/spinlock_64.h | 54 | ||||
-rw-r--r-- | arch/sparc/include/asm/spinlock_types.h | 8 | ||||
-rw-r--r-- | arch/sparc/include/asm/string_32.h | 78 | ||||
-rw-r--r-- | arch/sparc/include/asm/string_64.h | 25 | ||||
-rw-r--r-- | arch/sparc/include/asm/thread_info_64.h | 6 | ||||
-rw-r--r-- | arch/sparc/include/asm/timex_32.h | 1 | ||||
-rw-r--r-- | arch/sparc/include/asm/topology_64.h | 4 | ||||
-rw-r--r-- | arch/sparc/include/asm/uaccess_32.h | 15 | ||||
-rw-r--r-- | arch/sparc/include/asm/uaccess_64.h | 23 | ||||
-rw-r--r-- | arch/sparc/include/asm/unistd.h | 2 |
19 files changed, 132 insertions, 207 deletions
diff --git a/arch/sparc/include/asm/asm-offsets.h b/arch/sparc/include/asm/asm-offsets.h new file mode 100644 index 00000000000..d370ee36a18 --- /dev/null +++ b/arch/sparc/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include <generated/asm-offsets.h> diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h index 381a1b5256d..4269ca6ad18 100644 --- a/arch/sparc/include/asm/elf_32.h +++ b/arch/sparc/include/asm/elf_32.h @@ -104,8 +104,6 @@ typedef struct { #define ELF_CLASS ELFCLASS32 #define ELF_DATA ELFDATA2MSB -#define USE_ELF_CORE_DUMP - #define ELF_EXEC_PAGESIZE 4096 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h index d42e393078c..e67880381b8 100644 --- a/arch/sparc/include/asm/elf_64.h +++ b/arch/sparc/include/asm/elf_64.h @@ -152,7 +152,6 @@ typedef struct { (x)->e_machine == EM_SPARC32PLUS) #define compat_start_thread start_thread32 -#define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE PAGE_SIZE /* This is the location that an ET_DYN program is loaded if exec'ed. Typical @@ -196,17 +195,10 @@ static inline unsigned int sparc64_elf_hwcap(void) #define ELF_PLATFORM (NULL) #define SET_PERSONALITY(ex) \ -do { unsigned long new_flags = current_thread_info()->flags; \ - new_flags &= _TIF_32BIT; \ - if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ - new_flags |= _TIF_32BIT; \ +do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ + set_thread_flag(TIF_32BIT); \ else \ - new_flags &= ~_TIF_32BIT; \ - if ((current_thread_info()->flags & _TIF_32BIT) \ - != new_flags) \ - set_thread_flag(TIF_ABI_PENDING); \ - else \ - clear_thread_flag(TIF_ABI_PENDING); \ + clear_thread_flag(TIF_32BIT); \ /* flush_thread will update pgd cache */ \ if (personality(current->personality) != PER_LINUX32) \ set_personality(PER_LINUX | \ diff --git a/arch/sparc/include/asm/fcntl.h b/arch/sparc/include/asm/fcntl.h index d4d9c9d852c..38f37b333cc 100644 --- a/arch/sparc/include/asm/fcntl.h +++ b/arch/sparc/include/asm/fcntl.h @@ -1,14 +1,12 @@ #ifndef _SPARC_FCNTL_H #define _SPARC_FCNTL_H -/* open/fcntl - O_SYNC is only implemented on blocks devices and on files - located on an ext2 file system */ #define O_APPEND 0x0008 #define FASYNC 0x0040 /* fcntl, for BSD compatibility */ #define O_CREAT 0x0200 /* not fcntl */ #define O_TRUNC 0x0400 /* not fcntl */ #define O_EXCL 0x0800 /* not fcntl */ -#define O_SYNC 0x2000 +#define O_DSYNC 0x2000 /* used to be O_SYNC, see below */ #define O_NONBLOCK 0x4000 #if defined(__sparc__) && defined(__arch64__) #define O_NDELAY 0x0004 @@ -20,6 +18,21 @@ #define O_DIRECT 0x100000 /* direct disk access hint */ #define O_NOATIME 0x200000 #define O_CLOEXEC 0x400000 +/* + * Before Linux 2.6.33 only O_DSYNC semantics were implemented, but using + * the O_SYNC flag. We continue to use the existing numerical value + * for O_DSYNC semantics now, but using the correct symbolic name for it. + * This new value is used to request true Posix O_SYNC semantics. It is + * defined in this strange way to make sure applications compiled against + * new headers get at least O_DSYNC semantics on older kernels. + * + * This has the nice side-effect that we can simply test for O_DSYNC + * wherever we do not care if O_DSYNC or O_SYNC is used. + * + * Note: __O_SYNC must never be used directly. + */ +#define __O_SYNC 0x800000 +#define O_SYNC (__O_SYNC|O_DSYNC) #define F_GETOWN 5 /* for sockets. */ #define F_SETOWN 6 /* for sockets. */ diff --git a/arch/sparc/include/asm/io_32.h b/arch/sparc/include/asm/io_32.h index 93fe21e02c8..679c7504625 100644 --- a/arch/sparc/include/asm/io_32.h +++ b/arch/sparc/include/asm/io_32.h @@ -8,7 +8,7 @@ #include <asm/page.h> /* IO address mapping routines need this */ #include <asm/system.h> -#define page_to_phys(page) (((page) - mem_map) << PAGE_SHIFT) +#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) static inline u32 flip_dword (u32 l) { diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h index f72080bdda9..156707b0f18 100644 --- a/arch/sparc/include/asm/page_32.h +++ b/arch/sparc/include/asm/page_32.h @@ -143,7 +143,7 @@ extern unsigned long pfn_base; #define phys_to_virt __va #define ARCH_PFN_OFFSET (pfn_base) -#define virt_to_page(kaddr) (mem_map + ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT))) +#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define pfn_valid(pfn) (((pfn) >= (pfn_base)) && (((pfn)-(pfn_base)) < max_mapnr)) #define virt_addr_valid(kaddr) ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT) < max_mapnr) diff --git a/arch/sparc/include/asm/param.h b/arch/sparc/include/asm/param.h index 9836d9a3cb9..0bc356bf8c5 100644 --- a/arch/sparc/include/asm/param.h +++ b/arch/sparc/include/asm/param.h @@ -1,22 +1,7 @@ #ifndef _ASMSPARC_PARAM_H #define _ASMSPARC_PARAM_H -#ifdef __KERNEL__ -# define HZ CONFIG_HZ /* Internal kernel timer frequency */ -# define USER_HZ 100 /* .. some user interfaces are in "ticks" */ -# define CLOCKS_PER_SEC (USER_HZ) -#endif - -#ifndef HZ -#define HZ 100 -#endif - #define EXEC_PAGESIZE 8192 /* Thanks for sun4's we carry baggage... */ +#include <asm-generic/param.h> -#ifndef NOGROUP -#define NOGROUP (-1) -#endif - -#define MAXHOSTNAMELEN 64 /* max length of hostname */ - -#endif +#endif /* _ASMSPARC_PARAM_H */ diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h index b63e51c3c3e..b0576df6ec8 100644 --- a/arch/sparc/include/asm/pci_64.h +++ b/arch/sparc/include/asm/pci_64.h @@ -16,8 +16,6 @@ #define PCI_IRQ_NONE 0xffffffff -#define PCI_CACHE_LINE_BYTES 64 - static inline void pcibios_set_master(struct pci_dev *dev) { /* No special bus mastering setup handling */ diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index 857630cff63..7f9b9dba38a 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h @@ -10,12 +10,12 @@ #include <asm/psr.h> -#define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) +#define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) +#define arch_spin_unlock_wait(lock) \ + do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { __asm__ __volatile__( "\n1:\n\t" @@ -35,7 +35,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) : "g2", "memory", "cc"); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned int result; __asm__ __volatile__("ldstub [%1], %0" @@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) return (result == 0); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); } @@ -65,7 +65,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) * Sort of like atomic_t's on Sparc, but even more clever. * * ------------------------------------ - * | 24-bit counter | wlock | raw_rwlock_t + * | 24-bit counter | wlock | arch_rwlock_t * ------------------------------------ * 31 8 7 0 * @@ -76,9 +76,9 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) * * Unfortunately this scheme limits us to ~16,000,000 cpus. */ -static inline void arch_read_lock(raw_rwlock_t *rw) +static inline void __arch_read_lock(arch_rwlock_t *rw) { - register raw_rwlock_t *lp asm("g1"); + register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" @@ -89,16 +89,16 @@ static inline void arch_read_lock(raw_rwlock_t *rw) : "g2", "g4", "memory", "cc"); } -#define __raw_read_lock(lock) \ +#define arch_read_lock(lock) \ do { unsigned long flags; \ local_irq_save(flags); \ - arch_read_lock(lock); \ + __arch_read_lock(lock); \ local_irq_restore(flags); \ } while(0) -static inline void arch_read_unlock(raw_rwlock_t *rw) +static inline void __arch_read_unlock(arch_rwlock_t *rw) { - register raw_rwlock_t *lp asm("g1"); + register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" @@ -109,16 +109,16 @@ static inline void arch_read_unlock(raw_rwlock_t *rw) : "g2", "g4", "memory", "cc"); } -#define __raw_read_unlock(lock) \ +#define arch_read_unlock(lock) \ do { unsigned long flags; \ local_irq_save(flags); \ - arch_read_unlock(lock); \ + __arch_read_unlock(lock); \ local_irq_restore(flags); \ } while(0) -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { - register raw_rwlock_t *lp asm("g1"); + register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" @@ -130,7 +130,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) *(volatile __u32 *)&lp->lock = ~0U; } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned int val; @@ -150,9 +150,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) return (val == 0); } -static inline int arch_read_trylock(raw_rwlock_t *rw) +static inline int __arch_read_trylock(arch_rwlock_t *rw) { - register raw_rwlock_t *lp asm("g1"); + register arch_rwlock_t *lp asm("g1"); register int res asm("o0"); lp = rw; __asm__ __volatile__( @@ -165,27 +165,27 @@ static inline int arch_read_trylock(raw_rwlock_t *rw) return res; } -#define __raw_read_trylock(lock) \ +#define arch_read_trylock(lock) \ ({ unsigned long flags; \ int res; \ local_irq_save(flags); \ - res = arch_read_trylock(lock); \ + res = __arch_read_trylock(lock); \ local_irq_restore(flags); \ res; \ }) -#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) +#define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0) -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) -#define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) +#define arch_read_lock_flags(rw, flags) arch_read_lock(rw) +#define arch_write_lock_flags(rw, flags) arch_write_lock(rw) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() -#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff)) -#define __raw_write_can_lock(rw) (!(rw)->lock) +#define arch_read_can_lock(rw) (!((rw)->lock & 0xff)) +#define arch_write_can_lock(rw) (!(rw)->lock) #endif /* !(__ASSEMBLY__) */ diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 43e51478358..073936a8b27 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h @@ -21,13 +21,13 @@ * the spinner sections must be pre-V9 branches. */ -#define __raw_spin_is_locked(lp) ((lp)->lock != 0) +#define arch_spin_is_locked(lp) ((lp)->lock != 0) -#define __raw_spin_unlock_wait(lp) \ +#define arch_spin_unlock_wait(lp) \ do { rmb(); \ } while((lp)->lock) -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; @@ -46,7 +46,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) : "memory"); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned long result; @@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) return (result == 0UL); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { __asm__ __volatile__( " stb %%g0, [%0]" @@ -68,7 +68,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) : "memory"); } -static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { unsigned long tmp1, tmp2; @@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ -static void inline arch_read_lock(raw_rwlock_t *lock) +static void inline arch_read_lock(arch_rwlock_t *lock) { unsigned long tmp1, tmp2; @@ -115,7 +115,7 @@ static void inline arch_read_lock(raw_rwlock_t *lock) : "memory"); } -static int inline arch_read_trylock(raw_rwlock_t *lock) +static int inline arch_read_trylock(arch_rwlock_t *lock) { int tmp1, tmp2; @@ -136,7 +136,7 @@ static int inline arch_read_trylock(raw_rwlock_t *lock) return tmp1; } -static void inline arch_read_unlock(raw_rwlock_t *lock) +static void inline arch_read_unlock(arch_rwlock_t *lock) { unsigned long tmp1, tmp2; @@ -152,7 +152,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock) : "memory"); } -static void inline arch_write_lock(raw_rwlock_t *lock) +static void inline arch_write_lock(arch_rwlock_t *lock) { unsigned long mask, tmp1, tmp2; @@ -177,7 +177,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock) : "memory"); } -static void inline arch_write_unlock(raw_rwlock_t *lock) +static void inline arch_write_unlock(arch_rwlock_t *lock) { __asm__ __volatile__( " stw %%g0, [%0]" @@ -186,7 +186,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock) : "memory"); } -static int inline arch_write_trylock(raw_rwlock_t *lock) +static int inline arch_write_trylock(arch_rwlock_t *lock) { unsigned long mask, tmp1, tmp2, result; @@ -210,21 +210,21 @@ static int inline arch_write_trylock(raw_rwlock_t *lock) return result; } -#define __raw_read_lock(p) arch_read_lock(p) -#define __raw_read_lock_flags(p, f) arch_read_lock(p) -#define __raw_read_trylock(p) arch_read_trylock(p) -#define __raw_read_unlock(p) arch_read_unlock(p) -#define __raw_write_lock(p) arch_write_lock(p) -#define __raw_write_lock_flags(p, f) arch_write_lock(p) -#define __raw_write_unlock(p) arch_write_unlock(p) -#define __raw_write_trylock(p) arch_write_trylock(p) - -#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) -#define __raw_write_can_lock(rw) (!(rw)->lock) - -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_read_lock(p) arch_read_lock(p) +#define arch_read_lock_flags(p, f) arch_read_lock(p) +#define arch_read_trylock(p) arch_read_trylock(p) +#define arch_read_unlock(p) arch_read_unlock(p) +#define arch_write_lock(p) arch_write_lock(p) +#define arch_write_lock_flags(p, f) arch_write_lock(p) +#define arch_write_unlock(p) arch_write_unlock(p) +#define arch_write_trylock(p) arch_write_trylock(p) + +#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) +#define arch_write_can_lock(rw) (!(rw)->lock) + +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* !(__ASSEMBLY__) */ diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h index 37cbe01c585..9c454fdeaad 100644 --- a/arch/sparc/include/asm/spinlock_types.h +++ b/arch/sparc/include/asm/spinlock_types.h @@ -7,14 +7,14 @@ typedef struct { volatile unsigned char lock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/sparc/include/asm/string_32.h b/arch/sparc/include/asm/string_32.h index 6c5fddb7e6b..edf196ee4ef 100644 --- a/arch/sparc/include/asm/string_32.h +++ b/arch/sparc/include/asm/string_32.h @@ -16,8 +16,6 @@ #ifdef __KERNEL__ extern void __memmove(void *,const void *,__kernel_size_t); -extern __kernel_size_t __memcpy(void *,const void *,__kernel_size_t); -extern __kernel_size_t __memset(void *,int,__kernel_size_t); #ifndef EXPORT_SYMTAB_STROPS @@ -32,82 +30,10 @@ extern __kernel_size_t __memset(void *,int,__kernel_size_t); }) #define __HAVE_ARCH_MEMCPY - -static inline void *__constant_memcpy(void *to, const void *from, __kernel_size_t n) -{ - extern void __copy_1page(void *, const void *); - - if(n <= 32) { - __builtin_memcpy(to, from, n); - } else if (((unsigned int) to & 7) != 0) { - /* Destination is not aligned on the double-word boundary */ - __memcpy(to, from, n); - } else { - switch(n) { - case PAGE_SIZE: - __copy_1page(to, from); - break; - default: - __memcpy(to, from, n); - break; - } - } - return to; -} - -static inline void *__nonconstant_memcpy(void *to, const void *from, __kernel_size_t n) -{ - __memcpy(to, from, n); - return to; -} - -#undef memcpy -#define memcpy(t, f, n) \ -(__builtin_constant_p(n) ? \ - __constant_memcpy((t),(f),(n)) : \ - __nonconstant_memcpy((t),(f),(n))) +#define memcpy(t, f, n) __builtin_memcpy(t, f, n) #define __HAVE_ARCH_MEMSET - -static inline void *__constant_c_and_count_memset(void *s, char c, __kernel_size_t count) -{ - extern void bzero_1page(void *); - extern __kernel_size_t __bzero(void *, __kernel_size_t); - - if(!c) { - if(count == PAGE_SIZE) - bzero_1page(s); - else - __bzero(s, count); - } else { - __memset(s, c, count); - } - return s; -} - -static inline void *__constant_c_memset(void *s, char c, __kernel_size_t count) -{ - extern __kernel_size_t __bzero(void *, __kernel_size_t); - - if(!c) - __bzero(s, count); - else - __memset(s, c, count); - return s; -} - -static inline void *__nonconstant_memset(void *s, char c, __kernel_size_t count) -{ - __memset(s, c, count); - return s; -} - -#undef memset -#define memset(s, c, count) \ -(__builtin_constant_p(c) ? (__builtin_constant_p(count) ? \ - __constant_c_and_count_memset((s), (c), (count)) : \ - __constant_c_memset((s), (c), (count))) \ - : __nonconstant_memset((s), (c), (count))) +#define memset(s, c, count) __builtin_memset(s, c, count) #define __HAVE_ARCH_MEMSCAN diff --git a/arch/sparc/include/asm/string_64.h b/arch/sparc/include/asm/string_64.h index 43161f2d17e..9623bc21315 100644 --- a/arch/sparc/include/asm/string_64.h +++ b/arch/sparc/include/asm/string_64.h @@ -15,8 +15,6 @@ #include <asm/asi.h> -extern void *__memset(void *,int,__kernel_size_t); - #ifndef EXPORT_SYMTAB_STROPS /* First the mem*() things. */ @@ -24,29 +22,10 @@ extern void *__memset(void *,int,__kernel_size_t); extern void *memmove(void *, const void *, __kernel_size_t); #define __HAVE_ARCH_MEMCPY -extern void *memcpy(void *, const void *, __kernel_size_t); +#define memcpy(t, f, n) __builtin_memcpy(t, f, n) #define __HAVE_ARCH_MEMSET -extern void *__builtin_memset(void *,int,__kernel_size_t); - -static inline void *__constant_memset(void *s, int c, __kernel_size_t count) -{ - extern __kernel_size_t __bzero(void *, __kernel_size_t); - - if (!c) { - __bzero(s, count); - return s; - } else - return __memset(s, c, count); -} - -#undef memset -#define memset(s, c, count) \ -((__builtin_constant_p(count) && (count) <= 32) ? \ - __builtin_memset((s), (c), (count)) : \ - (__builtin_constant_p(c) ? \ - __constant_memset((s), (c), (count)) : \ - __memset((s), (c), (count)))) +#define memset(s, c, count) __builtin_memset(s, c, count) #define __HAVE_ARCH_MEMSCAN diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h index 1b45a7bbe40..39be9f256e5 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h @@ -227,12 +227,12 @@ register struct thread_info *current_thread_info_reg asm("g6"); /* flag bit 8 is available */ #define TIF_SECCOMP 9 /* secure computing */ #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */ -/* flag bit 11 is available */ +#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */ /* NOTE: Thread flags >= 12 should be ones we have no interest * in using in assembly, else we can't use the mask as * an immediate value in instructions such as andcc. */ -#define TIF_ABI_PENDING 12 +/* flag bit 12 is available */ #define TIF_MEMDIE 13 #define TIF_POLLING_NRFLAG 14 #define TIF_FREEZE 15 /* is freezing for suspend */ @@ -246,7 +246,7 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define _TIF_32BIT (1<<TIF_32BIT) #define _TIF_SECCOMP (1<<TIF_SECCOMP) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) -#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) +#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_FREEZE (1<<TIF_FREEZE) diff --git a/arch/sparc/include/asm/timex_32.h b/arch/sparc/include/asm/timex_32.h index b6ccdb0d6f7..a254750e4c0 100644 --- a/arch/sparc/include/asm/timex_32.h +++ b/arch/sparc/include/asm/timex_32.h @@ -12,4 +12,5 @@ typedef unsigned long cycles_t; #define get_cycles() (0) +extern u32 (*do_arch_gettimeoffset)(void); #endif diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h index 600a79035fa..1c79f32734a 100644 --- a/arch/sparc/include/asm/topology_64.h +++ b/arch/sparc/include/asm/topology_64.h @@ -12,7 +12,9 @@ static inline int cpu_to_node(int cpu) #define parent_node(node) (node) -#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node]) +#define cpumask_of_node(node) ((node) == -1 ? \ + cpu_all_mask : \ + &numa_cpumask_lookup_table[node]) struct pci_bus; #ifdef CONFIG_PCI diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 8303ac48103..25f1d10155e 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -260,8 +260,23 @@ static inline unsigned long __copy_to_user(void __user *to, const void *from, un return __copy_user(to, (__force void __user *) from, n); } +extern void copy_from_user_overflow(void) +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS + __compiletime_error("copy_from_user() buffer size is not provably correct") +#else + __compiletime_warning("copy_from_user() buffer size is not provably correct") +#endif +; + static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) { + int sz = __compiletime_object_size(to); + + if (unlikely(sz != -1 && sz < n)) { + copy_from_user_overflow(); + return n; + } + if (n && __access_ok((unsigned long) from, n)) return __copy_user((__force void __user *) to, from, n); else diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index 9ea271e19c7..2406788bfe5 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -6,6 +6,7 @@ */ #ifdef __KERNEL__ +#include <linux/errno.h> #include <linux/compiler.h> #include <linux/string.h> #include <linux/thread_info.h> @@ -204,6 +205,14 @@ __asm__ __volatile__( \ extern int __get_user_bad(void); +extern void copy_from_user_overflow(void) +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS + __compiletime_error("copy_from_user() buffer size is not provably correct") +#else + __compiletime_warning("copy_from_user() buffer size is not provably correct") +#endif +; + extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long size); @@ -212,10 +221,16 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from, static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long size) { - unsigned long ret = ___copy_from_user(to, from, size); - - if (unlikely(ret)) - ret = copy_from_user_fixup(to, from, size); + int sz = __compiletime_object_size(to); + unsigned long ret = size; + + if (likely(sz == -1 || sz >= size)) { + ret = ___copy_from_user(to, from, size); + if (unlikely(ret)) + ret = copy_from_user_fixup(to, from, size); + } else { + copy_from_user_overflow(); + } return ret; } #define __copy_from_user copy_from_user diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h index d8d25bd9712..cb4b9bfd0d8 100644 --- a/arch/sparc/include/asm/unistd.h +++ b/arch/sparc/include/asm/unistd.h @@ -398,7 +398,7 @@ #define __NR_perf_event_open 327 #define __NR_recvmmsg 328 -#define NR_SYSCALLS 329 +#define NR_syscalls 329 #ifdef __32bit_syscall_numbers__ /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, |