From d14d751ff9234595639a16e53b3cf0c575946bde Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 4 Aug 2009 17:17:00 +0900 Subject: sh64: Kill off special clear_page() implementation. This can use the now generic clear_page() implementation, which is backed by the sh64 optimized memset routine. This also fixes up the case where PAGE_SIZE != 4kB. Signed-off-by: Paul Mundt --- arch/sh/kernel/sh_ksyms_64.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/sh/kernel/sh_ksyms_64.c') diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c index f5bd156ea50..f96c95c07c4 100644 --- a/arch/sh/kernel/sh_ksyms_64.c +++ b/arch/sh/kernel/sh_ksyms_64.c @@ -52,7 +52,6 @@ EXPORT_SYMBOL(__get_user_asm_l); EXPORT_SYMBOL(__get_user_asm_q); EXPORT_SYMBOL(__strnlen_user); EXPORT_SYMBOL(__strncpy_from_user); -EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(__copy_user); -- cgit v1.2.3-70-g09d2 From 94ecd224c940830e2f2724c3860eb7fb74c15d31 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sun, 16 Aug 2009 01:50:17 +0900 Subject: sh: Fix up the SH-5 build with caches enabled. Signed-off-by: Paul Mundt --- arch/sh/include/asm/system.h | 14 +-- arch/sh/include/asm/system_32.h | 10 ++ arch/sh/include/asm/system_64.h | 5 + arch/sh/kernel/sh_ksyms_64.c | 8 -- arch/sh/mm/cache-sh5.c | 249 ++++------------------------------------ arch/sh/mm/flush-sh4.c | 81 +++++-------- 6 files changed, 64 insertions(+), 303 deletions(-) (limited to 'arch/sh/kernel/sh_ksyms_64.c') diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h index ab79e1f4fbe..bf7c4cbde37 100644 --- a/arch/sh/include/asm/system.h +++ b/arch/sh/include/asm/system.h @@ -14,18 +14,6 @@ #define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */ -#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5) -#define __icbi() \ -{ \ - unsigned long __addr; \ - __addr = 0xa8000000; \ - __asm__ __volatile__( \ - "icbi %0\n\t" \ - : /* no output */ \ - : "m" (__m(__addr))); \ -} -#endif - /* * A brief note on ctrl_barrier(), the control register write barrier. * @@ -44,7 +32,7 @@ #define mb() __asm__ __volatile__ ("synco": : :"memory") #define rmb() mb() #define wmb() __asm__ __volatile__ ("synco": : :"memory") -#define ctrl_barrier() __icbi() +#define ctrl_barrier() __icbi(0xa8000000) #define read_barrier_depends() do { } while(0) #else #define mb() __asm__ __volatile__ ("": : :"memory") diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h index d7299d69ff7..5ddd2359f3e 100644 --- a/arch/sh/include/asm/system_32.h +++ b/arch/sh/include/asm/system_32.h @@ -63,6 +63,16 @@ do { \ #define __restore_dsp(tsk) do { } while (0) #endif +#if defined(CONFIG_CPU_SH4A) +#define __icbi(addr) __asm__ __volatile__ ( "icbi @%0\n\t" : : "r" (addr)) +#else +#define __icbi(addr) mb() +#endif + +#define __ocbp(addr) __asm__ __volatile__ ( "ocbp @%0\n\t" : : "r" (addr)) +#define __ocbi(addr) __asm__ __volatile__ ( "ocbi @%0\n\t" : : "r" (addr)) +#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb @%0\n\t" : : "r" (addr)) + struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next); diff --git a/arch/sh/include/asm/system_64.h b/arch/sh/include/asm/system_64.h index 218b54d9d66..8e4a03e7966 100644 --- a/arch/sh/include/asm/system_64.h +++ b/arch/sh/include/asm/system_64.h @@ -37,6 +37,11 @@ do { \ #define jump_to_uncached() do { } while (0) #define back_to_cached() do { } while (0) +#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr)) +#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr)) +#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr)) +#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb %0, 0\n\t" : : "r" (addr)) + static inline reg_size_t register_align(void *val) { return (unsigned long long)(signed long long)(signed long)val; diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c index f96c95c07c4..d008e17eb25 100644 --- a/arch/sh/kernel/sh_ksyms_64.c +++ b/arch/sh/kernel/sh_ksyms_64.c @@ -30,14 +30,6 @@ extern int dump_fpu(struct pt_regs *, elf_fpregset_t *); EXPORT_SYMBOL(dump_fpu); EXPORT_SYMBOL(kernel_thread); -#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) -EXPORT_SYMBOL(clear_user_page); -#endif - -#ifndef CONFIG_CACHE_OFF -EXPORT_SYMBOL(flush_dcache_page); -#endif - #ifdef CONFIG_VT EXPORT_SYMBOL(screen_info); #endif diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c index a8f5142dc2c..819e0f9e8db 100644 --- a/arch/sh/mm/cache-sh5.c +++ b/arch/sh/mm/cache-sh5.c @@ -25,29 +25,6 @@ extern void __weak sh4__flush_region_init(void); /* Wired TLB entry for the D-cache */ static unsigned long long dtlb_cache_slot; -void __init cpu_cache_init(void) -{ - /* Reserve a slot for dcache colouring in the DTLB */ - dtlb_cache_slot = sh64_get_wired_dtlb_entry(); - - sh4__flush_region_init(); -} - -void __init kmap_coherent_init(void) -{ - /* XXX ... */ -} - -void *kmap_coherent(struct page *page, unsigned long addr) -{ - /* XXX ... */ - return NULL; -} - -void kunmap_coherent(void) -{ -} - #ifdef CONFIG_DCACHE_DISABLED #define sh64_dcache_purge_all() do { } while (0) #define sh64_dcache_purge_coloured_phy_page(paddr, eaddr) do { } while (0) @@ -233,52 +210,6 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm, } } -/* - * Invalidate a small range of user context I-cache, not necessarily page - * (or even cache-line) aligned. - * - * Since this is used inside ptrace, the ASID in the mm context typically - * won't match current_asid. We'll have to switch ASID to do this. For - * safety, and given that the range will be small, do all this under cli. - * - * Note, there is a hazard that the ASID in mm->context is no longer - * actually associated with mm, i.e. if the mm->context has started a new - * cycle since mm was last active. However, this is just a performance - * issue: all that happens is that we invalidate lines belonging to - * another mm, so the owning process has to refill them when that mm goes - * live again. mm itself can't have any cache entries because there will - * have been a flush_cache_all when the new mm->context cycle started. - */ -static void sh64_icache_inv_user_small_range(struct mm_struct *mm, - unsigned long start, int len) -{ - unsigned long long eaddr = start; - unsigned long long eaddr_end = start + len; - unsigned long current_asid, mm_asid; - unsigned long flags; - unsigned long long epage_start; - - /* - * Align to start of cache line. Otherwise, suppose len==8 and - * start was at 32N+28 : the last 4 bytes wouldn't get invalidated. - */ - eaddr = L1_CACHE_ALIGN(start); - eaddr_end = start + len; - - mm_asid = cpu_asid(smp_processor_id(), mm); - local_irq_save(flags); - current_asid = switch_and_save_asid(mm_asid); - - epage_start = eaddr & PAGE_MASK; - - while (eaddr < eaddr_end) { - __asm__ __volatile__("icbi %0, 0" : : "r" (eaddr)); - eaddr += L1_CACHE_BYTES; - } - switch_and_save_asid(current_asid); - local_irq_restore(flags); -} - static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end) { /* The icbi instruction never raises ITLBMISS. i.e. if there's not a @@ -564,7 +495,7 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm, * Invalidate the entire contents of both caches, after writing back to * memory any dirty data from the D-cache. */ -void flush_cache_all(void) +static void sh5_flush_cache_all(void) { sh64_dcache_purge_all(); sh64_icache_inv_all(); @@ -591,7 +522,7 @@ void flush_cache_all(void) * I-cache. This is similar to the lack of action needed in * flush_tlb_mm - see fault.c. */ -void flush_cache_mm(struct mm_struct *mm) +static void sh5_flush_cache_mm(struct mm_struct *mm) { sh64_dcache_purge_all(); } @@ -603,8 +534,8 @@ void flush_cache_mm(struct mm_struct *mm) * * Note, 'end' is 1 byte beyond the end of the range to flush. */ -void flush_cache_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end) +static void sh5_flush_cache_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; @@ -621,8 +552,8 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, * * Note, this is called with pte lock held. */ -void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, - unsigned long pfn) +static void sh5_flush_cache_page(struct vm_area_struct *vma, + unsigned long eaddr, unsigned long pfn) { sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT); @@ -630,7 +561,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, sh64_icache_inv_user_page(vma, eaddr); } -void flush_dcache_page(struct page *page) +static void sh5_flush_dcache_page(struct page *page) { sh64_dcache_purge_phy_page(page_to_phys(page)); wmb(); @@ -644,39 +575,20 @@ void flush_dcache_page(struct page *page) * mapping, therefore it's guaranteed that there no cache entries for * the range in cache sets of the wrong colour. */ -void flush_icache_range(unsigned long start, unsigned long end) +static void sh5_flush_icache_range(unsigned long start, unsigned long end) { __flush_purge_region((void *)start, end); wmb(); sh64_icache_inv_kernel_range(start, end); } -/* - * Flush the range of user (defined by vma->vm_mm) address space starting - * at 'addr' for 'len' bytes from the cache. The range does not straddle - * a page boundary, the unique physical page containing the range is - * 'page'. This seems to be used mainly for invalidating an address - * range following a poke into the program text through the ptrace() call - * from another process (e.g. for BRK instruction insertion). - */ -static void flush_icache_user_range(struct vm_area_struct *vma, - struct page *page, unsigned long addr, int len) -{ - - sh64_dcache_purge_coloured_phy_page(page_to_phys(page), addr); - mb(); - - if (vma->vm_flags & VM_EXEC) - sh64_icache_inv_user_small_range(vma->vm_mm, addr, len); -} - /* * For the address range [start,end), write back the data from the * D-cache and invalidate the corresponding region of the I-cache for the * current process. Used to flush signal trampolines on the stack to * make them executable. */ -void flush_cache_sigtramp(unsigned long vaddr) +static void sh5_flush_cache_sigtramp(unsigned long vaddr) { unsigned long end = vaddr + L1_CACHE_BYTES; @@ -685,138 +597,19 @@ void flush_cache_sigtramp(unsigned long vaddr) sh64_icache_inv_current_user_range(vaddr, end); } -#ifdef CONFIG_MMU -/* - * These *MUST* lie in an area of virtual address space that's otherwise - * unused. - */ -#define UNIQUE_EADDR_START 0xe0000000UL -#define UNIQUE_EADDR_END 0xe8000000UL - -/* - * Given a physical address paddr, and a user virtual address user_eaddr - * which will eventually be mapped to it, create a one-off kernel-private - * eaddr mapped to the same paddr. This is used for creating special - * destination pages for copy_user_page and clear_user_page. - */ -static unsigned long sh64_make_unique_eaddr(unsigned long user_eaddr, - unsigned long paddr) -{ - static unsigned long current_pointer = UNIQUE_EADDR_START; - unsigned long coloured_pointer; - - if (current_pointer == UNIQUE_EADDR_END) { - sh64_dcache_purge_all(); - current_pointer = UNIQUE_EADDR_START; - } - - coloured_pointer = (current_pointer & ~CACHE_OC_SYN_MASK) | - (user_eaddr & CACHE_OC_SYN_MASK); - sh64_setup_dtlb_cache_slot(coloured_pointer, get_asid(), paddr); - - current_pointer += (PAGE_SIZE << CACHE_OC_N_SYNBITS); - - return coloured_pointer; -} - -static void sh64_copy_user_page_coloured(void *to, void *from, - unsigned long address) -{ - void *coloured_to; - - /* - * Discard any existing cache entries of the wrong colour. These are - * present quite often, if the kernel has recently used the page - * internally, then given it up, then it's been allocated to the user. - */ - sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to); - - coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to)); - copy_page(from, coloured_to); - - sh64_teardown_dtlb_cache_slot(); -} - -static void sh64_clear_user_page_coloured(void *to, unsigned long address) -{ - void *coloured_to; - - /* - * Discard any existing kernel-originated lines of the wrong - * colour (as above) - */ - sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to); - - coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to)); - clear_page(coloured_to); - - sh64_teardown_dtlb_cache_slot(); -} - -/* - * 'from' and 'to' are kernel virtual addresses (within the superpage - * mapping of the physical RAM). 'address' is the user virtual address - * where the copy 'to' will be mapped after. This allows a custom - * mapping to be used to ensure that the new copy is placed in the - * right cache sets for the user to see it without having to bounce it - * out via memory. Note however : the call to flush_page_to_ram in - * (generic)/mm/memory.c:(break_cow) undoes all this good work in that one - * very important case! - * - * TBD : can we guarantee that on every call, any cache entries for - * 'from' are in the same colour sets as 'address' also? i.e. is this - * always used just to deal with COW? (I suspect not). - * - * There are two possibilities here for when the page 'from' was last accessed: - * - by the kernel : this is OK, no purge required. - * - by the/a user (e.g. for break_COW) : need to purge. - * - * If the potential user mapping at 'address' is the same colour as - * 'from' there is no need to purge any cache lines from the 'from' - * page mapped into cache sets of colour 'address'. (The copy will be - * accessing the page through 'from'). - */ -void copy_user_page(void *to, void *from, unsigned long address, - struct page *page) +void __init sh5_cache_init(void) { - if (((address ^ (unsigned long) from) & CACHE_OC_SYN_MASK) != 0) - sh64_dcache_purge_coloured_phy_page(__pa(from), address); + flush_cache_all = sh5_flush_cache_all; + flush_cache_mm = sh5_flush_cache_mm; + flush_cache_dup_mm = sh5_flush_cache_mm; + flush_cache_page = sh5_flush_cache_page; + flush_cache_range = sh5_flush_cache_range; + flush_dcache_page = sh5_flush_dcache_page; + flush_icache_range = sh5_flush_icache_range; + flush_cache_sigtramp = sh5_flush_cache_sigtramp; - if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) - copy_page(to, from); - else - sh64_copy_user_page_coloured(to, from, address); -} - -/* - * 'to' is a kernel virtual address (within the superpage mapping of the - * physical RAM). 'address' is the user virtual address where the 'to' - * page will be mapped after. This allows a custom mapping to be used to - * ensure that the new copy is placed in the right cache sets for the - * user to see it without having to bounce it out via memory. - */ -void clear_user_page(void *to, unsigned long address, struct page *page) -{ - if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) - clear_page(to); - else - sh64_clear_user_page_coloured(to, address); -} - -void copy_to_user_page(struct vm_area_struct *vma, struct page *page, - unsigned long vaddr, void *dst, const void *src, - unsigned long len) -{ - flush_cache_page(vma, vaddr, page_to_pfn(page)); - memcpy(dst, src, len); - flush_icache_user_range(vma, page, vaddr, len); -} + /* Reserve a slot for dcache colouring in the DTLB */ + dtlb_cache_slot = sh64_get_wired_dtlb_entry(); -void copy_from_user_page(struct vm_area_struct *vma, struct page *page, - unsigned long vaddr, void *dst, const void *src, - unsigned long len) -{ - flush_cache_page(vma, vaddr, page_to_pfn(page)); - memcpy(dst, src, len); + sh4__flush_region_init(); } -#endif diff --git a/arch/sh/mm/flush-sh4.c b/arch/sh/mm/flush-sh4.c index 99c50dc7551..cef402678f4 100644 --- a/arch/sh/mm/flush-sh4.c +++ b/arch/sh/mm/flush-sh4.c @@ -19,28 +19,19 @@ static void sh4__flush_wback_region(void *start, int size) cnt = (end - v) / L1_CACHE_BYTES; while (cnt >= 8) { - asm volatile("ocbwb @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbwb @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbwb @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbwb @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbwb @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbwb @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbwb @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbwb @%0" : : "r" (v)); - v += L1_CACHE_BYTES; + __ocbwb(v); v += L1_CACHE_BYTES; + __ocbwb(v); v += L1_CACHE_BYTES; + __ocbwb(v); v += L1_CACHE_BYTES; + __ocbwb(v); v += L1_CACHE_BYTES; + __ocbwb(v); v += L1_CACHE_BYTES; + __ocbwb(v); v += L1_CACHE_BYTES; + __ocbwb(v); v += L1_CACHE_BYTES; + __ocbwb(v); v += L1_CACHE_BYTES; cnt -= 8; } while (cnt) { - asm volatile("ocbwb @%0" : : "r" (v)); - v += L1_CACHE_BYTES; + __ocbwb(v); v += L1_CACHE_BYTES; cnt--; } } @@ -62,27 +53,18 @@ static void sh4__flush_purge_region(void *start, int size) cnt = (end - v) / L1_CACHE_BYTES; while (cnt >= 8) { - asm volatile("ocbp @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbp @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbp @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbp @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbp @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbp @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbp @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbp @%0" : : "r" (v)); - v += L1_CACHE_BYTES; + __ocbp(v); v += L1_CACHE_BYTES; + __ocbp(v); v += L1_CACHE_BYTES; + __ocbp(v); v += L1_CACHE_BYTES; + __ocbp(v); v += L1_CACHE_BYTES; + __ocbp(v); v += L1_CACHE_BYTES; + __ocbp(v); v += L1_CACHE_BYTES; + __ocbp(v); v += L1_CACHE_BYTES; + __ocbp(v); v += L1_CACHE_BYTES; cnt -= 8; } while (cnt) { - asm volatile("ocbp @%0" : : "r" (v)); - v += L1_CACHE_BYTES; + __ocbp(v); v += L1_CACHE_BYTES; cnt--; } } @@ -101,28 +83,19 @@ static void sh4__flush_invalidate_region(void *start, int size) cnt = (end - v) / L1_CACHE_BYTES; while (cnt >= 8) { - asm volatile("ocbi @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbi @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbi @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbi @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbi @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbi @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbi @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbi @%0" : : "r" (v)); - v += L1_CACHE_BYTES; + __ocbi(v); v += L1_CACHE_BYTES; + __ocbi(v); v += L1_CACHE_BYTES; + __ocbi(v); v += L1_CACHE_BYTES; + __ocbi(v); v += L1_CACHE_BYTES; + __ocbi(v); v += L1_CACHE_BYTES; + __ocbi(v); v += L1_CACHE_BYTES; + __ocbi(v); v += L1_CACHE_BYTES; + __ocbi(v); v += L1_CACHE_BYTES; cnt -= 8; } while (cnt) { - asm volatile("ocbi @%0" : : "r" (v)); - v += L1_CACHE_BYTES; + __ocbi(v); v += L1_CACHE_BYTES; cnt--; } } -- cgit v1.2.3-70-g09d2 From 4c978ca3194a4002407a85b15122f793efc8616b Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 27 Oct 2009 11:51:19 +0900 Subject: sh: Clean up more superfluous symbol exports. Many of these symbols went away completely, or we just never cared about them in the first place. Trim the exports down to the essential set. Signed-off-by: Paul Mundt --- arch/sh/kernel/process_32.c | 2 ++ arch/sh/kernel/process_64.c | 2 ++ arch/sh/kernel/sh_ksyms_32.c | 53 ++++++++++---------------------------------- arch/sh/kernel/sh_ksyms_64.c | 10 --------- 4 files changed, 16 insertions(+), 51 deletions(-) (limited to 'arch/sh/kernel/sh_ksyms_64.c') diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 0673c4746be..a40342be32b 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -142,6 +142,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) return pid; } +EXPORT_SYMBOL(kernel_thread); /* * Free current thread data structures etc.. @@ -186,6 +187,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) return fpvalid; } +EXPORT_SYMBOL(dump_fpu); asmlinkage void ret_from_fork(void); diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index 1192398ef58..359b8a2f4d2 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c @@ -335,6 +335,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); } +EXPORT_SYMBOL(kernel_thread); /* * Free current thread data structures etc.. @@ -417,6 +418,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) return 0; /* Task didn't use the fpu at all. */ #endif } +EXPORT_SYMBOL(dump_fpu); asmlinkage void ret_from_fork(void); diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c index 396e47d076f..3896f26efa4 100644 --- a/arch/sh/kernel/sh_ksyms_32.c +++ b/arch/sh/kernel/sh_ksyms_32.c @@ -1,37 +1,11 @@ #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include #include -#include -#include -#include -#include -#include - -extern int dump_fpu(struct pt_regs *, elf_fpregset_t *); - -/* platform dependent support */ -EXPORT_SYMBOL(dump_fpu); -EXPORT_SYMBOL(kernel_thread); -EXPORT_SYMBOL(strlen); - -/* PCI exports */ -#ifdef CONFIG_PCI -EXPORT_SYMBOL(pci_alloc_consistent); -EXPORT_SYMBOL(pci_free_consistent); -#endif +#include -/* mem exports */ EXPORT_SYMBOL(memchr); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); @@ -40,6 +14,13 @@ EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(__udelay); EXPORT_SYMBOL(__ndelay); EXPORT_SYMBOL(__const_udelay); +EXPORT_SYMBOL(strlen); +EXPORT_SYMBOL(csum_partial); +EXPORT_SYMBOL(csum_partial_copy_generic); +EXPORT_SYMBOL(copy_page); +EXPORT_SYMBOL(__clear_user); +EXPORT_SYMBOL(_ebss); +EXPORT_SYMBOL(empty_zero_page); #define DECLARE_EXPORT(name) \ extern void name(void);EXPORT_SYMBOL(name) @@ -107,16 +88,6 @@ DECLARE_EXPORT(__sdivsi3_i4); DECLARE_EXPORT(__udivsi3_i4); DECLARE_EXPORT(__sdivsi3_i4i); DECLARE_EXPORT(__udivsi3_i4i); - #ifdef CONFIG_MCOUNT DECLARE_EXPORT(mcount); #endif -EXPORT_SYMBOL(csum_partial); -EXPORT_SYMBOL(csum_partial_copy_generic); -#ifdef CONFIG_IPV6 -EXPORT_SYMBOL(csum_ipv6_magic); -#endif -EXPORT_SYMBOL(copy_page); -EXPORT_SYMBOL(__clear_user); -EXPORT_SYMBOL(_ebss); -EXPORT_SYMBOL(empty_zero_page); diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c index d008e17eb25..45afa5c51f6 100644 --- a/arch/sh/kernel/sh_ksyms_64.c +++ b/arch/sh/kernel/sh_ksyms_64.c @@ -24,16 +24,6 @@ #include #include -extern int dump_fpu(struct pt_regs *, elf_fpregset_t *); - -/* platform dependent support */ -EXPORT_SYMBOL(dump_fpu); -EXPORT_SYMBOL(kernel_thread); - -#ifdef CONFIG_VT -EXPORT_SYMBOL(screen_info); -#endif - EXPORT_SYMBOL(__put_user_asm_b); EXPORT_SYMBOL(__put_user_asm_w); EXPORT_SYMBOL(__put_user_asm_l); -- cgit v1.2.3-70-g09d2