diff options
Diffstat (limited to 'arch/microblaze')
35 files changed, 422 insertions, 460 deletions
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 203ec61c6d4..76818f92653 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -75,9 +75,6 @@ config LOCKDEP_SUPPORT config HAVE_LATENCYTOP_SUPPORT def_bool y -config PCI - def_bool n - config DTC def_bool y diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile index 836832dd9b2..72f6e858374 100644 --- a/arch/microblaze/Makefile +++ b/arch/microblaze/Makefile @@ -84,7 +84,7 @@ define archhelp echo '* linux.bin - Create raw binary' echo ' linux.bin.gz - Create compressed raw binary' echo ' simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in' - echo ' - stripped elf with fdt blob + echo ' - stripped elf with fdt blob' echo ' simpleImage.<dt>.unstrip - full ELF image with fdt blob' echo ' *_defconfig - Select default config from arch/microblaze/configs' echo '' @@ -94,3 +94,5 @@ define archhelp echo ' name of a dts file from the arch/microblaze/boot/dts/ directory' echo ' (minus the .dts extension).' endef + +MRPROPER_FILES += $(boot)/simpleImage.* diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile index 902cf9846c3..57f50c2371c 100644 --- a/arch/microblaze/boot/Makefile +++ b/arch/microblaze/boot/Makefile @@ -23,8 +23,6 @@ $(obj)/system.dtb: $(obj)/$(DTB).dtb endif $(obj)/linux.bin: vmlinux FORCE - [ -n $(CONFIG_INITRAMFS_SOURCE) ] && [ ! -e $(CONFIG_INITRAMFS_SOURCE) ] && \ - touch $(CONFIG_INITRAMFS_SOURCE) || echo "No CPIO image" $(call if_changed,objcopy) $(call if_changed,uimage) @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' @@ -62,6 +60,4 @@ quiet_cmd_dtc = DTC $@ $(obj)/%.dtb: $(dtstree)/%.dts FORCE $(call if_changed,dtc) -clean-kernel += linux.bin linux.bin.gz simpleImage.* - -clean-files += *.dtb simpleImage.*.unstrip +clean-files += *.dtb simpleImage.*.unstrip linux.bin.ub diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h index 8dbb6e7a03a..ad3fd61b2fe 100644 --- a/arch/microblaze/include/asm/futex.h +++ b/arch/microblaze/include/asm/futex.h @@ -55,7 +55,7 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) __futex_atomic_op("or %1,%0,%4;", ret, oldval, uaddr, oparg); break; case FUTEX_OP_ANDN: - __futex_atomic_op("and %1,%0,%4;", ret, oldval, uaddr, oparg); + __futex_atomic_op("andn %1,%0,%4;", ret, oldval, uaddr, oparg); break; case FUTEX_OP_XOR: __futex_atomic_op("xor %1,%0,%4;", ret, oldval, uaddr, oparg); diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h index 32d621a56ae..e45a6eea92e 100644 --- a/arch/microblaze/include/asm/io.h +++ b/arch/microblaze/include/asm/io.h @@ -108,6 +108,11 @@ static inline void writel(unsigned int v, volatile void __iomem *addr) #define iowrite16(v, addr) __raw_writew((u16)(v), (u16 *)(addr)) #define iowrite32(v, addr) __raw_writel((u32)(v), (u32 *)(addr)) +#define ioread16be(addr) __raw_readw((u16 *)(addr)) +#define ioread32be(addr) __raw_readl((u32 *)(addr)) +#define iowrite16be(v, addr) __raw_writew((u16)(v), (u16 *)(addr)) +#define iowrite32be(v, addr) __raw_writel((u32)(v), (u32 *)(addr)) + /* These are the definitions for the x86 IO instructions * inb/inw/inl/outb/outw/outl, the "string" versions * insb/insw/insl/outsb/outsw/outsl, and the "pausing" versions diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h index 563c6b9453f..8eeb09211ec 100644 --- a/arch/microblaze/include/asm/processor.h +++ b/arch/microblaze/include/asm/processor.h @@ -14,7 +14,6 @@ #include <asm/ptrace.h> #include <asm/setup.h> #include <asm/registers.h> -#include <asm/segment.h> #include <asm/entry.h> #include <asm/current.h> diff --git a/arch/microblaze/include/asm/segment.h b/arch/microblaze/include/asm/segment.h deleted file mode 100644 index 0e7102c3fb1..00000000000 --- a/arch/microblaze/include/asm/segment.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> - * Copyright (C) 2008-2009 PetaLogix - * Copyright (C) 2006 Atmark Techno, Inc. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#ifndef _ASM_MICROBLAZE_SEGMENT_H -#define _ASM_MICROBLAZE_SEGMENT_H - -# ifndef __ASSEMBLY__ - -typedef struct { - unsigned long seg; -} mm_segment_t; - -/* - * On Microblaze the fs value is actually the top of the corresponding - * address space. - * - * The fs value determines whether argument validity checking should be - * performed or not. If get_fs() == USER_DS, checking is performed, with - * get_fs() == KERNEL_DS, checking is bypassed. - * - * For historical reasons, these macros are grossly misnamed. - * - * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal. - */ -# define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) - -# ifndef CONFIG_MMU -# define KERNEL_DS MAKE_MM_SEG(0) -# define USER_DS KERNEL_DS -# else -# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF) -# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) -# endif - -# define get_ds() (KERNEL_DS) -# define get_fs() (current_thread_info()->addr_limit) -# define set_fs(val) (current_thread_info()->addr_limit = (val)) - -# define segment_eq(a, b) ((a).seg == (b).seg) - -# endif /* __ASSEMBLY__ */ -#endif /* _ASM_MICROBLAZE_SEGMENT_H */ diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h index 6e92885d381..b2ca80f6464 100644 --- a/arch/microblaze/include/asm/thread_info.h +++ b/arch/microblaze/include/asm/thread_info.h @@ -19,7 +19,6 @@ #ifndef __ASSEMBLY__ # include <linux/types.h> # include <asm/processor.h> -# include <asm/segment.h> /* * low level task data that entry.S needs immediate access to @@ -60,6 +59,10 @@ struct cpu_context { __u32 fsr; }; +typedef struct { + unsigned long seg; +} mm_segment_t; + struct thread_info { struct task_struct *task; /* main task structure */ struct exec_domain *exec_domain; /* execution domain */ diff --git a/arch/microblaze/include/asm/tlbflush.h b/arch/microblaze/include/asm/tlbflush.h index bcb8b41d55a..2e1353c2d18 100644 --- a/arch/microblaze/include/asm/tlbflush.h +++ b/arch/microblaze/include/asm/tlbflush.h @@ -24,6 +24,7 @@ extern void _tlbie(unsigned long address); extern void _tlbia(void); #define __tlbia() { preempt_disable(); _tlbia(); preempt_enable(); } +#define __tlbie(x) { _tlbie(x); } static inline void local_flush_tlb_all(void) { __tlbia(); } @@ -31,7 +32,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) { __tlbia(); } static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) - { _tlbie(vmaddr); } + { __tlbie(vmaddr); } static inline void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { __tlbia(); } diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 371bd6e56d9..446bec29b14 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -22,101 +22,73 @@ #include <asm/mmu.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/segment.h> #include <linux/string.h> #define VERIFY_READ 0 #define VERIFY_WRITE 1 -#define __clear_user(addr, n) (memset((void *)(addr), 0, (n)), 0) - -#ifndef CONFIG_MMU - -extern int ___range_ok(unsigned long addr, unsigned long size); - -#define __range_ok(addr, size) \ - ___range_ok((unsigned long)(addr), (unsigned long)(size)) - -#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0) -#define __access_ok(add, size) (__range_ok((addr), (size)) == 0) - -/* Undefined function to trigger linker error */ -extern int bad_user_access_length(void); - -/* FIXME this is function for optimalization -> memcpy */ -#define __get_user(var, ptr) \ -({ \ - int __gu_err = 0; \ - switch (sizeof(*(ptr))) { \ - case 1: \ - case 2: \ - case 4: \ - (var) = *(ptr); \ - break; \ - case 8: \ - memcpy((void *) &(var), (ptr), 8); \ - break; \ - default: \ - (var) = 0; \ - __gu_err = __get_user_bad(); \ - break; \ - } \ - __gu_err; \ -}) +/* + * On Microblaze the fs value is actually the top of the corresponding + * address space. + * + * The fs value determines whether argument validity checking should be + * performed or not. If get_fs() == USER_DS, checking is performed, with + * get_fs() == KERNEL_DS, checking is bypassed. + * + * For historical reasons, these macros are grossly misnamed. + * + * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal. + */ +# define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) -#define __get_user_bad() (bad_user_access_length(), (-EFAULT)) +# ifndef CONFIG_MMU +# define KERNEL_DS MAKE_MM_SEG(0) +# define USER_DS KERNEL_DS +# else +# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF) +# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) +# endif -/* FIXME is not there defined __pu_val */ -#define __put_user(var, ptr) \ -({ \ - int __pu_err = 0; \ - switch (sizeof(*(ptr))) { \ - case 1: \ - case 2: \ - case 4: \ - *(ptr) = (var); \ - break; \ - case 8: { \ - typeof(*(ptr)) __pu_val = (var); \ - memcpy(ptr, &__pu_val, sizeof(__pu_val)); \ - } \ - break; \ - default: \ - __pu_err = __put_user_bad(); \ - break; \ - } \ - __pu_err; \ -}) +# define get_ds() (KERNEL_DS) +# define get_fs() (current_thread_info()->addr_limit) +# define set_fs(val) (current_thread_info()->addr_limit = (val)) -#define __put_user_bad() (bad_user_access_length(), (-EFAULT)) +# define segment_eq(a, b) ((a).seg == (b).seg) -#define put_user(x, ptr) __put_user((x), (ptr)) -#define get_user(x, ptr) __get_user((x), (ptr)) +/* + * The exception table consists of pairs of addresses: the first is the + * address of an instruction that is allowed to fault, and the second is + * the address at which the program should continue. No registers are + * modified, so it is entirely up to the continuation code to figure out + * what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ +struct exception_table_entry { + unsigned long insn, fixup; +}; -#define copy_to_user(to, from, n) (memcpy((to), (from), (n)), 0) -#define copy_from_user(to, from, n) (memcpy((to), (from), (n)), 0) +/* Returns 0 if exception not found and fixup otherwise. */ +extern unsigned long search_exception_table(unsigned long); -#define __copy_to_user(to, from, n) (copy_to_user((to), (from), (n))) -#define __copy_from_user(to, from, n) (copy_from_user((to), (from), (n))) -#define __copy_to_user_inatomic(to, from, n) \ - (__copy_to_user((to), (from), (n))) -#define __copy_from_user_inatomic(to, from, n) \ - (__copy_from_user((to), (from), (n))) +#ifndef CONFIG_MMU -static inline unsigned long clear_user(void *addr, unsigned long size) +/* Check against bounds of physical memory */ +static inline int ___range_ok(unsigned long addr, unsigned long size) { - if (access_ok(VERIFY_WRITE, addr, size)) - size = __clear_user(addr, size); - return size; + return ((addr < memory_start) || + ((addr + size) > memory_end)); } -/* Returns 0 if exception not found and fixup otherwise. */ -extern unsigned long search_exception_table(unsigned long); +#define __range_ok(addr, size) \ + ___range_ok((unsigned long)(addr), (unsigned long)(size)) -extern long strncpy_from_user(char *dst, const char *src, long count); -extern long strnlen_user(const char *src, long count); +#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0) -#else /* CONFIG_MMU */ +#else /* * Address is valid if: @@ -129,24 +101,88 @@ extern long strnlen_user(const char *src, long count); /* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n", type?"WRITE":"READ",addr,size,get_fs().seg)) */ -/* - * All the __XXX versions macros/functions below do not perform - * access checking. It is assumed that the necessary checks have been - * already performed before the finction (macro) is called. - */ +#endif -#define get_user(x, ptr) \ -({ \ - access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \ - ? __get_user((x), (ptr)) : -EFAULT; \ -}) +#ifdef CONFIG_MMU +# define __FIXUP_SECTION ".section .fixup,\"ax\"\n" +# define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n" +#else +# define __FIXUP_SECTION ".section .discard,\"ax\"\n" +# define __EX_TABLE_SECTION ".section .discard,\"a\"\n" +#endif -#define put_user(x, ptr) \ -({ \ - access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \ - ? __put_user((x), (ptr)) : -EFAULT; \ +extern unsigned long __copy_tofrom_user(void __user *to, + const void __user *from, unsigned long size); + +/* Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail. */ +static inline unsigned long __must_check __clear_user(void __user *to, + unsigned long n) +{ + /* normal memset with two words to __ex_table */ + __asm__ __volatile__ ( \ + "1: sb r0, %2, r0;" \ + " addik %0, %0, -1;" \ + " bneid %0, 1b;" \ + " addik %2, %2, 1;" \ + "2: " \ + __EX_TABLE_SECTION \ + ".word 1b,2b;" \ + ".previous;" \ + : "=r"(n) \ + : "0"(n), "r"(to) + ); + return n; +} + +static inline unsigned long __must_check clear_user(void __user *to, + unsigned long n) +{ + might_sleep(); + if (unlikely(!access_ok(VERIFY_WRITE, to, n))) + return n; + + return __clear_user(to, n); +} + +/* put_user and get_user macros */ +extern long __user_bad(void); + +#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \ +({ \ + __asm__ __volatile__ ( \ + "1:" insn " %1, %2, r0;" \ + " addk %0, r0, r0;" \ + "2: " \ + __FIXUP_SECTION \ + "3: brid 2b;" \ + " addik %0, r0, %3;" \ + ".previous;" \ + __EX_TABLE_SECTION \ + ".word 1b,3b;" \ + ".previous;" \ + : "=&r"(__gu_err), "=r"(__gu_val) \ + : "r"(__gu_ptr), "i"(-EFAULT) \ + ); \ }) +/** + * get_user: - Get a simple variable from user space. + * @x: Variable to store result. + * @ptr: Source address, in user space. + * + * Context: User context only. This function may sleep. + * + * This macro copies a single simple variable from user space to kernel + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and the result of + * dereferencing @ptr must be assignable to @x without a cast. + * + * Returns zero on success, or -EFAULT on error. + * On error, the variable @x is set to zero. + */ + #define __get_user(x, ptr) \ ({ \ unsigned long __gu_val; \ @@ -163,30 +199,74 @@ extern long strnlen_user(const char *src, long count); __get_user_asm("lw", (ptr), __gu_val, __gu_err); \ break; \ default: \ - __gu_val = 0; __gu_err = -EINVAL; \ + /* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\ } \ x = (__typeof__(*(ptr))) __gu_val; \ __gu_err; \ }) -#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \ + +#define get_user(x, ptr) \ ({ \ - __asm__ __volatile__ ( \ - "1:" insn " %1, %2, r0; \ - addk %0, r0, r0; \ - 2: \ - .section .fixup,\"ax\"; \ - 3: brid 2b; \ - addik %0, r0, %3; \ - .previous; \ - .section __ex_table,\"a\"; \ - .word 1b,3b; \ - .previous;" \ - : "=r"(__gu_err), "=r"(__gu_val) \ - : "r"(__gu_ptr), "i"(-EFAULT) \ - ); \ + access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \ + ? __get_user((x), (ptr)) : -EFAULT; \ +}) + +#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \ +({ \ + __asm__ __volatile__ ( \ + "1:" insn " %1, %2, r0;" \ + " addk %0, r0, r0;" \ + "2: " \ + __FIXUP_SECTION \ + "3: brid 2b;" \ + " addik %0, r0, %3;" \ + ".previous;" \ + __EX_TABLE_SECTION \ + ".word 1b,3b;" \ + ".previous;" \ + : "=&r"(__gu_err) \ + : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \ + ); \ }) +#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \ +({ \ + __asm__ __volatile__ (" lwi %0, %1, 0;" \ + "1: swi %0, %2, 0;" \ + " lwi %0, %1, 4;" \ + "2: swi %0, %2, 4;" \ + " addk %0, r0, r0;" \ + "3: " \ + __FIXUP_SECTION \ + "4: brid 3b;" \ + " addik %0, r0, %3;" \ + ".previous;" \ + __EX_TABLE_SECTION \ + ".word 1b,4b,2b,4b;" \ + ".previous;" \ + : "=&r"(__gu_err) \ + : "r"(&__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \ + ); \ +}) + +/** + * put_user: - Write a simple value into user space. + * @x: Value to copy to user space. + * @ptr: Destination address, in user space. + * + * Context: User context only. This function may sleep. + * + * This macro copies a single simple value from kernel space to user + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and @x must be assignable + * to the result of dereferencing @ptr. + * + * Returns zero on success, or -EFAULT on error. + */ + #define __put_user(x, ptr) \ ({ \ __typeof__(*(ptr)) volatile __gu_val = (x); \ @@ -195,7 +275,7 @@ extern long strnlen_user(const char *src, long count); case 1: \ __put_user_asm("sb", (ptr), __gu_val, __gu_err); \ break; \ - case 2: \ + case 2: \ __put_user_asm("sh", (ptr), __gu_val, __gu_err); \ break; \ case 4: \ @@ -205,121 +285,82 @@ extern long strnlen_user(const char *src, long count); __put_user_asm_8((ptr), __gu_val, __gu_err); \ break; \ default: \ - __gu_err = -EINVAL; \ + /*__gu_err = -EINVAL;*/ __gu_err = __user_bad(); \ } \ __gu_err; \ }) -#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \ -({ \ -__asm__ __volatile__ (" lwi %0, %1, 0; \ - 1: swi %0, %2, 0; \ - lwi %0, %1, 4; \ - 2: swi %0, %2, 4; \ - addk %0,r0,r0; \ - 3: \ - .section .fixup,\"ax\"; \ - 4: brid 3b; \ - addik %0, r0, %3; \ - .previous; \ - .section __ex_table,\"a\"; \ - .word 1b,4b,2b,4b; \ - .previous;" \ - : "=&r"(__gu_err) \ - : "r"(&__gu_val), \ - "r"(__gu_ptr), "i"(-EFAULT) \ - ); \ -}) +#ifndef CONFIG_MMU -#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \ -({ \ - __asm__ __volatile__ ( \ - "1:" insn " %1, %2, r0; \ - addk %0, r0, r0; \ - 2: \ - .section .fixup,\"ax\"; \ - 3: brid 2b; \ - addik %0, r0, %3; \ - .previous; \ - .section __ex_table,\"a\"; \ - .word 1b,3b; \ - .previous;" \ - : "=r"(__gu_err) \ - : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \ - ); \ -}) +#define put_user(x, ptr) __put_user((x), (ptr)) -/* - * Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail. - */ -static inline int clear_user(char *to, int size) -{ - if (size && access_ok(VERIFY_WRITE, to, size)) { - __asm__ __volatile__ (" \ - 1: \ - sb r0, %2, r0; \ - addik %0, %0, -1; \ - bneid %0, 1b; \ - addik %2, %2, 1; \ - 2: \ - .section __ex_table,\"a\"; \ - .word 1b,2b; \ - .section .text;" \ - : "=r"(size) \ - : "0"(size), "r"(to) - ); - } - return size; -} +#else /* CONFIG_MMU */ -#define __copy_from_user(to, from, n) copy_from_user((to), (from), (n)) +#define put_user(x, ptr) \ +({ \ + access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \ + ? __put_user((x), (ptr)) : -EFAULT; \ +}) +#endif /* CONFIG_MMU */ + +/* copy_to_from_user */ +#define __copy_from_user(to, from, n) \ + __copy_tofrom_user((__force void __user *)(to), \ + (void __user *)(from), (n)) #define __copy_from_user_inatomic(to, from, n) \ copy_from_user((to), (from), (n)) -#define copy_to_user(to, from, n) \ - (access_ok(VERIFY_WRITE, (to), (n)) ? \ - __copy_tofrom_user((void __user *)(to), \ - (__force const void __user *)(from), (n)) \ - : -EFAULT) +static inline long copy_from_user(void *to, + const void __user *from, unsigned long n) +{ + might_sleep(); + if (access_ok(VERIFY_READ, from, n)) + return __copy_from_user(to, from, n); + return n; +} -#define __copy_to_user(to, from, n) copy_to_user((to), (from), (n)) +#define __copy_to_user(to, from, n) \ + __copy_tofrom_user((void __user *)(to), \ + (__force const void __user *)(from), (n)) #define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n)) -#define copy_from_user(to, from, n) \ - (access_ok(VERIFY_READ, (from), (n)) ? \ - __copy_tofrom_user((__force void __user *)(to), \ - (void __user *)(from), (n)) \ - : -EFAULT) +static inline long copy_to_user(void __user *to, + const void *from, unsigned long n) +{ + might_sleep(); + if (access_ok(VERIFY_WRITE, to, n)) + return __copy_to_user(to, from, n); + return n; +} +/* + * Copy a null terminated string from userspace. + */ extern int __strncpy_user(char *to, const char __user *from, int len); -extern int __strnlen_user(const char __user *sstr, int len); -#define strncpy_from_user(to, from, len) \ - (access_ok(VERIFY_READ, from, 1) ? \ - __strncpy_user(to, from, len) : -EFAULT) -#define strnlen_user(str, len) \ - (access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0) +#define __strncpy_from_user __strncpy_user -#endif /* CONFIG_MMU */ - -extern unsigned long __copy_tofrom_user(void __user *to, - const void __user *from, unsigned long size); +static inline long +strncpy_from_user(char *dst, const char __user *src, long count) +{ + if (!access_ok(VERIFY_READ, src, 1)) + return -EFAULT; + return __strncpy_from_user(dst, src, count); +} /* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. + * Return the size of a string (including the ending 0) * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. + * Return 0 on exception, a value greater than N if too long */ -struct exception_table_entry { - unsigned long insn, fixup; -}; +extern int __strnlen_user(const char __user *sstr, int len); + +static inline long strnlen_user(const char __user *src, long n) +{ + if (!access_ok(VERIFY_READ, src, 1)) + return 0; + return __strnlen_user(src, n); +} #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c index 991d71311b0..255ef880351 100644 --- a/arch/microblaze/kernel/cpu/cpuinfo.c +++ b/arch/microblaze/kernel/cpu/cpuinfo.c @@ -9,7 +9,6 @@ */ #include <linux/init.h> -#include <linux/slab.h> #include <asm/cpuinfo.h> #include <asm/pvr.h> diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index b1084974fcc..ce72dd4967c 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c @@ -8,6 +8,7 @@ #include <linux/device.h> #include <linux/dma-mapping.h> +#include <linux/gfp.h> #include <linux/dma-debug.h> #include <asm/bug.h> #include <asm/cacheflush.h> @@ -37,7 +38,7 @@ static inline void __dma_sync_page(unsigned long paddr, unsigned long offset, static unsigned long get_dma_direct_offset(struct device *dev) { - if (dev) + if (likely(dev)) return (unsigned long)dev->archdata.dma_data; return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */ diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c index 388b31ca65a..515feb40455 100644 --- a/arch/microblaze/kernel/ftrace.c +++ b/arch/microblaze/kernel/ftrace.c @@ -151,13 +151,10 @@ int ftrace_make_nop(struct module *mod, return ret; } -static int ret_addr; /* initialized as 0 by default */ - /* I believe that first is called ftrace_make_nop before this function */ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { int ret; - ret_addr = addr; /* saving where the barrier jump is */ pr_debug("%s: addr:0x%x, rec->ip: 0x%x, imm:0x%x\n", __func__, (unsigned int)addr, (unsigned int)rec->ip, imm); ret = ftrace_modify_code(rec->ip, imm); @@ -194,12 +191,9 @@ int ftrace_update_ftrace_func(ftrace_func_t func) ret = ftrace_modify_code(ip, upper); ret += ftrace_modify_code(ip + 4, lower); - /* We just need to remove the rtsd r15, 8 by NOP */ - BUG_ON(!ret_addr); - if (ret_addr) - ret += ftrace_modify_code(ret_addr, MICROBLAZE_NOP); - else - ret = 1; /* fault */ + /* We just need to replace the rtsd r15, 8 with NOP */ + ret += ftrace_modify_code((unsigned long)&ftrace_caller, + MICROBLAZE_NOP); /* All changes are done - lets do caches consistent */ flush_icache(); diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index cb7815cfe5a..da6a5f5dc76 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S @@ -51,6 +51,12 @@ swapper_pg_dir: .text ENTRY(_start) +#if CONFIG_KERNEL_BASE_ADDR == 0 + brai TOPHYS(real_start) + .org 0x100 +real_start: +#endif + mfs r1, rmsr andi r1, r1, ~2 mts rmsr, r1 @@ -99,8 +105,8 @@ no_fdt_arg: tophys(r4,r4) /* convert to phys address */ ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */ _copy_command_line: - lbu r2, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */ - sb r2, r4, r6 /* addr[r4+r6]= r7*/ + lbu r2, r5, r6 /* r2=r5+r6 - r5 contain pointer to command line */ + sb r2, r4, r6 /* addr[r4+r6]= r2*/ addik r6, r6, 1 /* increment counting */ bgtid r3, _copy_command_line /* loop for all entries */ addik r3, r3, -1 /* descrement loop */ @@ -128,7 +134,7 @@ _copy_bram: * virtual to physical. */ nop - addik r3, r0, 63 /* Invalidate all TLB entries */ + addik r3, r0, MICROBLAZE_TLB_SIZE -1 /* Invalidate all TLB entries */ _invalidate: mts rtlbx, r3 mts rtlbhi, r0 /* flush: ensure V is clear */ diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S index 2b86c03aa84..995a2123635 100644 --- a/arch/microblaze/kernel/hw_exception_handler.S +++ b/arch/microblaze/kernel/hw_exception_handler.S @@ -313,13 +313,13 @@ _hw_exception_handler: mfs r5, rmsr; nop swi r5, r1, 0; - mfs r3, resr + mfs r4, resr nop - mfs r4, rear; + mfs r3, rear; nop #ifndef CONFIG_MMU - andi r5, r3, 0x1000; /* Check ESR[DS] */ + andi r5, r4, 0x1000; /* Check ESR[DS] */ beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */ mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ nop @@ -327,13 +327,14 @@ not_in_delay_slot: swi r17, r1, PT_R17 #endif - andi r5, r3, 0x1F; /* Extract ESR[EXC] */ + andi r5, r4, 0x1F; /* Extract ESR[EXC] */ #ifdef CONFIG_MMU /* Calculate exception vector offset = r5 << 2 */ addk r6, r5, r5; /* << 1 */ addk r6, r6, r6; /* << 2 */ +#ifdef DEBUG /* counting which exception happen */ lwi r5, r0, 0x200 + TOPHYS(r0_ram) addi r5, r5, 1 @@ -341,6 +342,7 @@ not_in_delay_slot: lwi r5, r6, 0x200 + TOPHYS(r0_ram) addi r5, r5, 1 swi r5, r6, 0x200 + TOPHYS(r0_ram) +#endif /* end */ /* Load the HW Exception vector */ lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable) @@ -376,7 +378,7 @@ handle_other_ex: /* Handle Other exceptions here */ swi r18, r1, PT_R18 or r5, r1, r0 - andi r6, r3, 0x1F; /* Load ESR[EC] */ + andi r6, r4, 0x1F; /* Load ESR[EC] */ lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */ swi r7, r1, PT_MODE mfs r7, rfsr @@ -426,11 +428,11 @@ handle_other_ex: /* Handle Other exceptions here */ */ handle_unaligned_ex: /* Working registers already saved: R3, R4, R5, R6 - * R3 = ESR - * R4 = EAR + * R4 = ESR + * R3 = EAR */ #ifdef CONFIG_MMU - andi r6, r3, 0x1000 /* Check ESR[DS] */ + andi r6, r4, 0x1000 /* Check ESR[DS] */ beqi r6, _no_delayslot /* Branch if ESR[DS] not set */ mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ nop @@ -439,7 +441,7 @@ _no_delayslot: RESTORE_STATE; bri unaligned_data_trap #endif - andi r6, r3, 0x3E0; /* Mask and extract the register operand */ + andi r6, r4, 0x3E0; /* Mask and extract the register operand */ srl r6, r6; /* r6 >> 5 */ srl r6, r6; srl r6, r6; @@ -448,33 +450,33 @@ _no_delayslot: /* Store the register operand in a temporary location */ sbi r6, r0, TOPHYS(ex_reg_op); - andi r6, r3, 0x400; /* Extract ESR[S] */ + andi r6, r4, 0x400; /* Extract ESR[S] */ bnei r6, ex_sw; ex_lw: - andi r6, r3, 0x800; /* Extract ESR[W] */ + andi r6, r4, 0x800; /* Extract ESR[W] */ beqi r6, ex_lhw; - lbui r5, r4, 0; /* Exception address in r4 */ + lbui r5, r3, 0; /* Exception address in r3 */ /* Load a word, byte-by-byte from destination address and save it in tmp space */ sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); - lbui r5, r4, 1; + lbui r5, r3, 1; sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); - lbui r5, r4, 2; + lbui r5, r3, 2; sbi r5, r0, TOPHYS(ex_tmp_data_loc_2); - lbui r5, r4, 3; + lbui r5, r3, 3; sbi r5, r0, TOPHYS(ex_tmp_data_loc_3); - /* Get the destination register value into r3 */ - lwi r3, r0, TOPHYS(ex_tmp_data_loc_0); + /* Get the destination register value into r4 */ + lwi r4, r0, TOPHYS(ex_tmp_data_loc_0); bri ex_lw_tail; ex_lhw: - lbui r5, r4, 0; /* Exception address in r4 */ + lbui r5, r3, 0; /* Exception address in r3 */ /* Load a half-word, byte-by-byte from destination address and save it in tmp space */ sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); - lbui r5, r4, 1; + lbui r5, r3, 1; sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); - /* Get the destination register value into r3 */ - lhui r3, r0, TOPHYS(ex_tmp_data_loc_0); + /* Get the destination register value into r4 */ + lhui r4, r0, TOPHYS(ex_tmp_data_loc_0); ex_lw_tail: /* Get the destination register number into r5 */ lbui r5, r0, TOPHYS(ex_reg_op); @@ -502,25 +504,25 @@ ex_sw_tail: andi r6, r6, 0x800; /* Extract ESR[W] */ beqi r6, ex_shw; /* Get the word - delay slot */ - swi r3, r0, TOPHYS(ex_tmp_data_loc_0); + swi r4, r0, TOPHYS(ex_tmp_data_loc_0); /* Store the word, byte-by-byte into destination address */ - lbui r3, r0, TOPHYS(ex_tmp_data_loc_0); - sbi r3, r4, 0; - lbui r3, r0, TOPHYS(ex_tmp_data_loc_1); - sbi r3, r4, 1; - lbui r3, r0, TOPHYS(ex_tmp_data_loc_2); - sbi r3, r4, 2; - lbui r3, r0, TOPHYS(ex_tmp_data_loc_3); - sbi r3, r4, 3; + lbui r4, r0, TOPHYS(ex_tmp_data_loc_0); + sbi r4, r3, 0; + lbui r4, r0, TOPHYS(ex_tmp_data_loc_1); + sbi r4, r3, 1; + lbui r4, r0, TOPHYS(ex_tmp_data_loc_2); + sbi r4, r3, 2; + lbui r4, r0, TOPHYS(ex_tmp_data_loc_3); + sbi r4, r3, 3; bri ex_handler_done; ex_shw: /* Store the lower half-word, byte-by-byte into destination address */ - swi r3, r0, TOPHYS(ex_tmp_data_loc_0); - lbui r3, r0, TOPHYS(ex_tmp_data_loc_2); - sbi r3, r4, 0; - lbui r3, r0, TOPHYS(ex_tmp_data_loc_3); - sbi r3, r4, 1; + swi r4, r0, TOPHYS(ex_tmp_data_loc_0); + lbui r4, r0, TOPHYS(ex_tmp_data_loc_2); + sbi r4, r3, 0; + lbui r4, r0, TOPHYS(ex_tmp_data_loc_3); + sbi r4, r3, 1; ex_sw_end: /* Exception handling of store word, ends. */ ex_handler_done: @@ -560,21 +562,16 @@ ex_handler_done: */ mfs r11, rpid nop - bri 4 - mfs r3, rear /* Get faulting address */ - nop /* If we are faulting a kernel address, we have to use the * kernel page tables. */ - ori r4, r0, CONFIG_KERNEL_START - cmpu r4, r3, r4 - bgti r4, ex3 + ori r5, r0, CONFIG_KERNEL_START + cmpu r5, r3, r5 + bgti r5, ex3 /* First, check if it was a zone fault (which means a user * tried to access a kernel or read-protected page - always * a SEGV). All other faults here must be stores, so no * need to check ESR_S as well. */ - mfs r4, resr - nop andi r4, r4, 0x800 /* ESR_Z - zone protection */ bnei r4, ex2 @@ -589,8 +586,6 @@ ex_handler_done: * tried to access a kernel or read-protected page - always * a SEGV). All other faults here must be stores, so no * need to check ESR_S as well. */ - mfs r4, resr - nop andi r4, r4, 0x800 /* ESR_Z */ bnei r4, ex2 /* get current task address */ @@ -665,8 +660,6 @@ ex_handler_done: * R3 = ESR */ - mfs r3, rear /* Get faulting address */ - nop RESTORE_STATE; bri page_fault_instr_trap @@ -677,18 +670,15 @@ ex_handler_done: */ handle_data_tlb_miss_exception: /* Working registers already saved: R3, R4, R5, R6 - * R3 = ESR + * R3 = EAR, R4 = ESR */ mfs r11, rpid nop - bri 4 - mfs r3, rear /* Get faulting address */ - nop /* If we are faulting a kernel address, we have to use the * kernel page tables. */ - ori r4, r0, CONFIG_KERNEL_START - cmpu r4, r3, r4 + ori r6, r0, CONFIG_KERNEL_START + cmpu r4, r3, r6 bgti r4, ex5 ori r4, r0, swapper_pg_dir mts rpid, r0 /* TLB will have 0 TID */ @@ -731,9 +721,8 @@ ex_handler_done: * Many of these bits are software only. Bits we don't set * here we (properly should) assume have the appropriate value. */ + brid finish_tlb_load andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ - - bri finish_tlb_load ex7: /* The bailout. Restore registers to pre-exception conditions * and call the heavyweights to help us out. @@ -754,9 +743,6 @@ ex_handler_done: */ mfs r11, rpid nop - bri 4 - mfs r3, rear /* Get faulting address */ - nop /* If we are faulting a kernel address, we have to use the * kernel page tables. @@ -792,7 +778,7 @@ ex_handler_done: lwi r4, r5, 0 /* Get Linux PTE */ andi r6, r4, _PAGE_PRESENT - beqi r6, ex7 + beqi r6, ex10 ori r4, r4, _PAGE_ACCESSED swi r4, r5, 0 @@ -805,9 +791,8 @@ ex_handler_done: * Many of these bits are software only. Bits we don't set * here we (properly should) assume have the appropriate value. */ + brid finish_tlb_load andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ - - bri finish_tlb_load ex10: /* The bailout. Restore registers to pre-exception conditions * and call the heavyweights to help us out. @@ -837,9 +822,9 @@ ex_handler_done: andi r5, r5, (MICROBLAZE_TLB_SIZE-1) ori r6, r0, 1 cmp r31, r5, r6 - blti r31, sem + blti r31, ex12 addik r5, r6, 1 - sem: + ex12: /* MS: save back current TLB index */ swi r5, r0, TOPHYS(tlb_index) @@ -859,7 +844,6 @@ ex_handler_done: nop /* Done...restore registers and get out of here. */ - ex12: mts rpid, r11 nop bri 4 diff --git a/arch/microblaze/kernel/misc.S b/arch/microblaze/kernel/misc.S index df16c6287a8..7cf86498326 100644 --- a/arch/microblaze/kernel/misc.S +++ b/arch/microblaze/kernel/misc.S @@ -26,9 +26,10 @@ * We avoid flushing the pinned 0, 1 and possibly 2 entries. */ .globl _tlbia; +.type _tlbia, @function .align 4; _tlbia: - addik r12, r0, 63 /* flush all entries (63 - 3) */ + addik r12, r0, MICROBLAZE_TLB_SIZE - 1 /* flush all entries (63 - 3) */ /* isync */ _tlbia_1: mts rtlbx, r12 @@ -41,11 +42,13 @@ _tlbia_1: /* sync */ rtsd r15, 8 nop + .size _tlbia, . - _tlbia /* * Flush MMU TLB for a particular address (in r5) */ .globl _tlbie; +.type _tlbie, @function .align 4; _tlbie: mts rtlbsx, r5 /* look up the address in TLB */ @@ -59,17 +62,20 @@ _tlbie_1: rtsd r15, 8 nop + .size _tlbie, . - _tlbie + /* * Allocate TLB entry for early console */ .globl early_console_reg_tlb_alloc; +.type early_console_reg_tlb_alloc, @function .align 4; early_console_reg_tlb_alloc: /* * Load a TLB entry for the UART, so that microblaze_progress() can use * the UARTs nice and early. We use a 4k real==virtual mapping. */ - ori r4, r0, 63 + ori r4, r0, MICROBLAZE_TLB_SIZE - 1 mts rtlbx, r4 /* TLB slot 2 */ or r4,r5,r0 @@ -86,6 +92,8 @@ early_console_reg_tlb_alloc: rtsd r15, 8 nop + .size early_console_reg_tlb_alloc, . - early_console_reg_tlb_alloc + /* * Copy a whole page (4096 bytes). */ @@ -104,6 +112,7 @@ early_console_reg_tlb_alloc: #define DCACHE_LINE_BYTES (4 * 4) .globl copy_page; +.type copy_page, @function .align 4; copy_page: ori r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1 @@ -118,3 +127,5 @@ _copy_page_loop: addik r11, r11, -1 rtsd r15, 8 nop + + .size copy_page, . - copy_page diff --git a/arch/microblaze/kernel/module.c b/arch/microblaze/kernel/module.c index 5a45b1adfef..cbecf110dc3 100644 --- a/arch/microblaze/kernel/module.c +++ b/arch/microblaze/kernel/module.c @@ -12,7 +12,6 @@ #include <linux/kernel.h> #include <linux/elf.h> #include <linux/vmalloc.h> -#include <linux/slab.h> #include <linux/fs.h> #include <linux/string.h> diff --git a/arch/microblaze/kernel/of_platform.c b/arch/microblaze/kernel/of_platform.c index 1c6d684996d..0dc755286d3 100644 --- a/arch/microblaze/kernel/of_platform.c +++ b/arch/microblaze/kernel/of_platform.c @@ -17,7 +17,6 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/mod_devicetable.h> -#include <linux/slab.h> #include <linux/pci.h> #include <linux/of.h> #include <linux/of_device.h> diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index 812f1bf06c9..09bed44dfcd 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c @@ -15,6 +15,7 @@ #include <linux/bitops.h> #include <asm/system.h> #include <asm/pgalloc.h> +#include <asm/uaccess.h> /* for USER_DS macros */ #include <asm/cacheflush.h> void show_regs(struct pt_regs *regs) @@ -74,7 +75,10 @@ __setup("hlt", hlt_setup); void default_idle(void) { - if (!hlt_counter) { + if (likely(hlt_counter)) { + while (!need_resched()) + cpu_relax(); + } else { clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb__after_clear_bit(); local_irq_disable(); @@ -82,9 +86,7 @@ void default_idle(void) cpu_sleep(); local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); - } else - while (!need_resched()) - cpu_relax(); + } } void cpu_idle(void) diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c index 6d6349a145f..a4a7770c614 100644 --- a/arch/microblaze/kernel/ptrace.c +++ b/arch/microblaze/kernel/ptrace.c @@ -75,7 +75,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) { int rval; unsigned long val = 0; - unsigned long copied; switch (request) { /* Read/write the word at location ADDR in the registers. */ diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index f974ec7aa35..17c98dbcec8 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c @@ -92,6 +92,12 @@ inline unsigned get_romfs_len(unsigned *addr) } #endif /* CONFIG_MTD_UCLINUX_EBSS */ +#if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE) +#define eprintk early_printk +#else +#define eprintk printk +#endif + void __init machine_early_init(const char *cmdline, unsigned int ram, unsigned int fdt, unsigned int msr) { @@ -139,32 +145,32 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, setup_early_printk(NULL); #endif - early_printk("Ramdisk addr 0x%08x, ", ram); + eprintk("Ramdisk addr 0x%08x, ", ram); if (fdt) - early_printk("FDT at 0x%08x\n", fdt); + eprintk("FDT at 0x%08x\n", fdt); else - early_printk("Compiled-in FDT at 0x%08x\n", + eprintk("Compiled-in FDT at 0x%08x\n", (unsigned int)_fdt_start); #ifdef CONFIG_MTD_UCLINUX - early_printk("Found romfs @ 0x%08x (0x%08x)\n", + eprintk("Found romfs @ 0x%08x (0x%08x)\n", romfs_base, romfs_size); - early_printk("#### klimit %p ####\n", old_klimit); + eprintk("#### klimit %p ####\n", old_klimit); BUG_ON(romfs_size < 0); /* What else can we do? */ - early_printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n", + eprintk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n", romfs_size, romfs_base, (unsigned)&_ebss); - early_printk("New klimit: 0x%08x\n", (unsigned)klimit); + eprintk("New klimit: 0x%08x\n", (unsigned)klimit); #endif #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR if (msr) - early_printk("!!!Your kernel has setup MSR instruction but " + eprintk("!!!Your kernel has setup MSR instruction but " "CPU don't have it %d\n", msr); #else if (!msr) - early_printk("!!!Your kernel not setup MSR instruction but " + eprintk("!!!Your kernel not setup MSR instruction but " "CPU have it %d\n", msr); #endif diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c index 9f3c205fb75..f4e00b7f125 100644 --- a/arch/microblaze/kernel/sys_microblaze.c +++ b/arch/microblaze/kernel/sys_microblaze.c @@ -30,6 +30,7 @@ #include <linux/semaphore.h> #include <linux/uaccess.h> #include <linux/unistd.h> +#include <linux/slab.h> #include <asm/syscalls.h> diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c index eaaaf805f31..5e4570ef515 100644 --- a/arch/microblaze/kernel/traps.c +++ b/arch/microblaze/kernel/traps.c @@ -22,13 +22,11 @@ void trap_init(void) __enable_hw_exceptions(); } -static int kstack_depth_to_print = 24; +static unsigned long kstack_depth_to_print = 24; static int __init kstack_setup(char *s) { - kstack_depth_to_print = strict_strtoul(s, 0, NULL); - - return 1; + return !strict_strtoul(s, 0, &kstack_depth_to_print); } __setup("kstack=", kstack_setup); diff --git a/arch/microblaze/lib/Makefile b/arch/microblaze/lib/Makefile index b579db068c0..4dfe47d3cd9 100644 --- a/arch/microblaze/lib/Makefile +++ b/arch/microblaze/lib/Makefile @@ -10,5 +10,4 @@ else lib-y += memcpy.o memmove.o endif -lib-$(CONFIG_NO_MMU) += uaccess.o -lib-$(CONFIG_MMU) += uaccess_old.o +lib-y += uaccess_old.o diff --git a/arch/microblaze/lib/fastcopy.S b/arch/microblaze/lib/fastcopy.S index 02e3ab4eddf..fdc48bb065d 100644 --- a/arch/microblaze/lib/fastcopy.S +++ b/arch/microblaze/lib/fastcopy.S @@ -30,8 +30,9 @@ */ #include <linux/linkage.h> - + .text .globl memcpy + .type memcpy, @function .ent memcpy memcpy: @@ -345,9 +346,11 @@ a_done: rtsd r15, 8 nop +.size memcpy, . - memcpy .end memcpy /*----------------------------------------------------------------------------*/ .globl memmove + .type memmove, @function .ent memmove memmove: @@ -659,4 +662,5 @@ d_done: rtsd r15, 8 nop +.size memmove, . - memmove .end memmove diff --git a/arch/microblaze/lib/memcpy.c b/arch/microblaze/lib/memcpy.c index cc2108b6b26..014bac92bdf 100644 --- a/arch/microblaze/lib/memcpy.c +++ b/arch/microblaze/lib/memcpy.c @@ -53,7 +53,7 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) const uint32_t *i_src; uint32_t *i_dst; - if (c >= 4) { + if (likely(c >= 4)) { unsigned value, buf_hold; /* Align the dstination to a word boundry. */ diff --git a/arch/microblaze/lib/memset.c b/arch/microblaze/lib/memset.c index 4df851d41a2..ecfb663e1fc 100644 --- a/arch/microblaze/lib/memset.c +++ b/arch/microblaze/lib/memset.c @@ -33,22 +33,23 @@ #ifdef __HAVE_ARCH_MEMSET void *memset(void *v_src, int c, __kernel_size_t n) { - char *src = v_src; #ifdef CONFIG_OPT_LIB_FUNCTION uint32_t *i_src; - uint32_t w32; + uint32_t w32 = 0; #endif /* Truncate c to 8 bits */ c = (c & 0xFF); #ifdef CONFIG_OPT_LIB_FUNCTION - /* Make a repeating word out of it */ - w32 = c; - w32 |= w32 << 8; - w32 |= w32 << 16; + if (unlikely(c)) { + /* Make a repeating word out of it */ + w32 = c; + w32 |= w32 << 8; + w32 |= w32 << 16; + } - if (n >= 4) { + if (likely(n >= 4)) { /* Align the destination to a word boundary */ /* This is done in an endian independant manner */ switch ((unsigned) src & 3) { diff --git a/arch/microblaze/lib/uaccess.c b/arch/microblaze/lib/uaccess.c deleted file mode 100644 index a853fe089c4..00000000000 --- a/arch/microblaze/lib/uaccess.c +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (C) 2006 Atmark Techno, Inc. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#include <linux/string.h> -#include <asm/uaccess.h> - -#include <asm/bug.h> - -long strnlen_user(const char __user *src, long count) -{ - return strlen(src) + 1; -} - -#define __do_strncpy_from_user(dst, src, count, res) \ - do { \ - char *tmp; \ - strncpy(dst, src, count); \ - for (tmp = dst; *tmp && count > 0; tmp++, count--) \ - ; \ - res = (tmp - dst); \ - } while (0) - -long __strncpy_from_user(char *dst, const char __user *src, long count) -{ - long res; - __do_strncpy_from_user(dst, src, count, res); - return res; -} - -long strncpy_from_user(char *dst, const char __user *src, long count) -{ - long res = -EFAULT; - if (access_ok(VERIFY_READ, src, 1)) - __do_strncpy_from_user(dst, src, count, res); - return res; -} - -unsigned long __copy_tofrom_user(void __user *to, - const void __user *from, unsigned long size) -{ - memcpy(to, from, size); - return 0; -} diff --git a/arch/microblaze/lib/uaccess_old.S b/arch/microblaze/lib/uaccess_old.S index 67f991c14b8..5810cec54a7 100644 --- a/arch/microblaze/lib/uaccess_old.S +++ b/arch/microblaze/lib/uaccess_old.S @@ -22,6 +22,7 @@ .text .globl __strncpy_user; +.type __strncpy_user, @function .align 4; __strncpy_user: @@ -50,7 +51,7 @@ __strncpy_user: 3: rtsd r15,8 nop - + .size __strncpy_user, . - __strncpy_user .section .fixup, "ax" .align 2 @@ -72,6 +73,7 @@ __strncpy_user: .text .globl __strnlen_user; +.type __strnlen_user, @function .align 4; __strnlen_user: addik r3,r6,0 @@ -90,7 +92,7 @@ __strnlen_user: 3: rtsd r15,8 nop - + .size __strnlen_user, . - __strnlen_user .section .fixup,"ax" 4: @@ -108,6 +110,7 @@ __strnlen_user: */ .text .globl __copy_tofrom_user; +.type __copy_tofrom_user, @function .align 4; __copy_tofrom_user: /* @@ -116,20 +119,34 @@ __copy_tofrom_user: * r7, r3 - count * r4 - tempval */ - addik r3,r7,0 - beqi r3,3f -1: - lbu r4,r6,r0 - addik r6,r6,1 -2: - sb r4,r5,r0 - addik r3,r3,-1 - bneid r3,1b - addik r5,r5,1 /* delay slot */ + beqid r7, 3f /* zero size is not likely */ + andi r3, r7, 0x3 /* filter add count */ + bneid r3, 4f /* if is odd value then byte copying */ + or r3, r5, r6 /* find if is any to/from unaligned */ + andi r3, r3, 0x3 /* mask unaligned */ + bneid r3, 1f /* it is unaligned -> then jump */ + or r3, r0, r0 + +/* at least one 4 byte copy */ +5: lw r4, r6, r3 +6: sw r4, r5, r3 + addik r7, r7, -4 + bneid r7, 5b + addik r3, r3, 4 + addik r3, r7, 0 + rtsd r15, 8 + nop +4: or r3, r0, r0 +1: lbu r4,r6,r3 +2: sb r4,r5,r3 + addik r7,r7,-1 + bneid r7,1b + addik r3,r3,1 /* delay slot */ 3: + addik r3,r7,0 rtsd r15,8 nop - + .size __copy_tofrom_user, . - __copy_tofrom_user .section __ex_table,"a" - .word 1b,3b,2b,3b + .word 1b,3b,2b,3b,5b,3b,6b,3b diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c index a9b443e3fb9..f956e24fe49 100644 --- a/arch/microblaze/mm/consistent.c +++ b/arch/microblaze/mm/consistent.c @@ -32,6 +32,7 @@ #include <linux/highmem.h> #include <linux/pci.h> #include <linux/interrupt.h> +#include <linux/gfp.h> #include <asm/pgalloc.h> #include <linux/io.h> diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index d9d249a66ff..7af87f4b2c2 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -106,7 +106,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, regs->esr = error_code; /* On a kernel SLB miss we can only check for a valid exception entry */ - if (kernel_mode(regs) && (address >= TASK_SIZE)) { + if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) { printk(KERN_WARNING "kernel task_size exceed"); _exception(SIGSEGV, regs, code, address); } @@ -122,7 +122,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, } #endif /* CONFIG_KGDB */ - if (in_atomic() || !mm) { + if (unlikely(in_atomic() || !mm)) { if (kernel_mode(regs)) goto bad_area_nosemaphore; @@ -150,7 +150,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, * source. If this is invalid we can skip the address space check, * thus avoiding the deadlock. */ - if (!down_read_trylock(&mm->mmap_sem)) { + if (unlikely(!down_read_trylock(&mm->mmap_sem))) { if (kernel_mode(regs) && !search_exception_tables(regs->pc)) goto bad_area_nosemaphore; @@ -158,16 +158,16 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, } vma = find_vma(mm, address); - if (!vma) + if (unlikely(!vma)) goto bad_area; if (vma->vm_start <= address) goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) + if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) goto bad_area; - if (!is_write) + if (unlikely(!is_write)) goto bad_area; /* @@ -179,7 +179,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, * before setting the user r1. Thus we allow the stack to * expand to 1MB without further checks. */ - if (address + 0x100000 < vma->vm_end) { + if (unlikely(address + 0x100000 < vma->vm_end)) { /* get user regs even if this fault is in kernel mode */ struct pt_regs *uregs = current->thread.regs; @@ -209,15 +209,15 @@ good_area: code = SEGV_ACCERR; /* a write */ - if (is_write) { - if (!(vma->vm_flags & VM_WRITE)) + if (unlikely(is_write)) { + if (unlikely(!(vma->vm_flags & VM_WRITE))) goto bad_area; /* a read */ } else { /* protection fault */ - if (error_code & 0x08000000) + if (unlikely(error_code & 0x08000000)) goto bad_area; - if (!(vma->vm_flags & (VM_READ | VM_EXEC))) + if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC)))) goto bad_area; } @@ -235,7 +235,7 @@ survive: goto do_sigbus; BUG(); } - if (fault & VM_FAULT_MAJOR) + if (unlikely(fault & VM_FAULT_MAJOR)) current->maj_flt++; else current->min_flt++; diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 1608e2e1a44..f42c2dde8b1 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -15,6 +15,7 @@ #include <linux/initrd.h> #include <linux/pagemap.h> #include <linux/pfn.h> +#include <linux/slab.h> #include <linux/swap.h> #include <asm/page.h> @@ -165,7 +166,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) for (addr = begin; addr < end; addr += PAGE_SIZE) { ClearPageReserved(virt_to_page(addr)); init_page_count(virt_to_page(addr)); - memset((void *)addr, 0xcc, PAGE_SIZE); free_page(addr); totalram_pages++; } @@ -208,14 +208,6 @@ void __init mem_init(void) } #ifndef CONFIG_MMU -/* Check against bounds of physical memory */ -int ___range_ok(unsigned long addr, unsigned long size) -{ - return ((addr < memory_start) || - ((addr + size) > memory_end)); -} -EXPORT_SYMBOL(___range_ok); - int page_is_ram(unsigned long pfn) { return __range_ok(pfn, 0); diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c index 63a6fd07c48..d31312cde6e 100644 --- a/arch/microblaze/mm/pgtable.c +++ b/arch/microblaze/mm/pgtable.c @@ -154,7 +154,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags) err = 0; set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags))); - if (mem_init_done) + if (unlikely(mem_init_done)) flush_HPTE(0, va, pmd_val(*pd)); /* flush_HPTE(0, va, pg); */ } diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c index 0be34350d73..740bb32ec57 100644 --- a/arch/microblaze/pci/pci-common.c +++ b/arch/microblaze/pci/pci-common.c @@ -26,6 +26,7 @@ #include <linux/syscalls.h> #include <linux/irq.h> #include <linux/vmalloc.h> +#include <linux/slab.h> #include <asm/processor.h> #include <asm/io.h> diff --git a/arch/microblaze/pci/pci_32.c b/arch/microblaze/pci/pci_32.c index 7e0c94f501c..3c3d808d7ce 100644 --- a/arch/microblaze/pci/pci_32.c +++ b/arch/microblaze/pci/pci_32.c @@ -14,6 +14,7 @@ #include <linux/irq.h> #include <linux/list.h> #include <linux/of.h> +#include <linux/slab.h> #include <asm/processor.h> #include <asm/io.h> |