diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-10-25 17:30:53 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-10-25 17:30:53 +0100 |
commit | 0b9e31e9264f1bad89856afb96da1688292f13b4 (patch) | |
tree | 7a9e9b6456dce993efeed8734de0a15a1f16ae94 /arch/m32r | |
parent | cf82ff7ea7695b0e82ba07bc5e9f1bd03a74e1aa (diff) | |
parent | 964fe080d94db82a3268443e9b9ece4c60246414 (diff) |
Merge branch 'linus' into sched/core
Conflicts:
fs/proc/array.c
Merge reason: resolve conflict and queue up dependent patch.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/m32r')
-rw-r--r-- | arch/m32r/Kconfig | 6 | ||||
-rw-r--r-- | arch/m32r/boot/compressed/install.sh | 4 | ||||
-rw-r--r-- | arch/m32r/include/asm/hardirq.h | 15 | ||||
-rw-r--r-- | arch/m32r/include/asm/io.h | 7 | ||||
-rw-r--r-- | arch/m32r/include/asm/mman.h | 18 | ||||
-rw-r--r-- | arch/m32r/include/asm/mmu_context.h | 4 | ||||
-rw-r--r-- | arch/m32r/include/asm/page.h | 4 | ||||
-rw-r--r-- | arch/m32r/include/asm/processor.h | 2 | ||||
-rw-r--r-- | arch/m32r/include/asm/smp.h | 2 | ||||
-rw-r--r-- | arch/m32r/include/asm/thread_info.h | 15 | ||||
-rw-r--r-- | arch/m32r/kernel/entry.S | 7 | ||||
-rw-r--r-- | arch/m32r/kernel/head.S | 4 | ||||
-rw-r--r-- | arch/m32r/kernel/init_task.c | 5 | ||||
-rw-r--r-- | arch/m32r/kernel/m32r_ksyms.c | 6 | ||||
-rw-r--r-- | arch/m32r/kernel/ptrace.c | 5 | ||||
-rw-r--r-- | arch/m32r/kernel/smp.c | 31 | ||||
-rw-r--r-- | arch/m32r/kernel/smpboot.c | 4 | ||||
-rw-r--r-- | arch/m32r/kernel/time.c | 83 | ||||
-rw-r--r-- | arch/m32r/kernel/traps.c | 4 | ||||
-rw-r--r-- | arch/m32r/kernel/vmlinux.lds.S | 78 | ||||
-rw-r--r-- | arch/m32r/lib/delay.c | 4 | ||||
-rw-r--r-- | arch/m32r/mm/discontig.c | 5 | ||||
-rw-r--r-- | arch/m32r/mm/init.c | 2 | ||||
-rw-r--r-- | arch/m32r/mm/mmu.S | 12 |
24 files changed, 97 insertions, 230 deletions
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index cabba332cc4..c41234f1b82 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -41,6 +41,12 @@ config HZ int default 100 +config GENERIC_TIME + def_bool y + +config ARCH_USES_GETTIMEOFFSET + def_bool y + source "init/Kconfig" source "kernel/Kconfig.freezer" diff --git a/arch/m32r/boot/compressed/install.sh b/arch/m32r/boot/compressed/install.sh index 6d72e9e7269..16e5a0a1343 100644 --- a/arch/m32r/boot/compressed/install.sh +++ b/arch/m32r/boot/compressed/install.sh @@ -24,8 +24,8 @@ # User may have a custom install script -if [ -x /sbin/installkernel ]; then - exec /sbin/installkernel "$@" +if [ -x /sbin/${INSTALLKERNEL} ]; then + exec /sbin/${INSTALLKERNEL} "$@" fi if [ "$2" = "zImage" ]; then diff --git a/arch/m32r/include/asm/hardirq.h b/arch/m32r/include/asm/hardirq.h index cb8aa762f23..4c31c0ae215 100644 --- a/arch/m32r/include/asm/hardirq.h +++ b/arch/m32r/include/asm/hardirq.h @@ -2,14 +2,7 @@ #ifndef __ASM_HARDIRQ_H #define __ASM_HARDIRQ_H -#include <linux/threads.h> -#include <linux/irq.h> - -typedef struct { - unsigned int __softirq_pending; -} ____cacheline_aligned irq_cpustat_t; - -#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ +#include <asm/irq.h> #if NR_IRQS > 256 #define HARDIRQ_BITS 9 @@ -26,11 +19,7 @@ typedef struct { # error HARDIRQ_BITS is too low! #endif -static inline void ack_bad_irq(int irq) -{ - printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq); - BUG(); -} +#include <asm-generic/hardirq.h> #endif /* __ASM_HARDIRQ_H */ #endif /* __KERNEL__ */ diff --git a/arch/m32r/include/asm/io.h b/arch/m32r/include/asm/io.h index d06933bd631..4010f1fc5b6 100644 --- a/arch/m32r/include/asm/io.h +++ b/arch/m32r/include/asm/io.h @@ -162,6 +162,13 @@ static inline void _writel(unsigned long l, unsigned long addr) #define __raw_writew writew #define __raw_writel writel +#define ioread8 read +#define ioread16 readw +#define ioread32 readl +#define iowrite8 writeb +#define iowrite16 writew +#define iowrite32 writel + #define mmiowb() #define flush_write_buffers() do { } while (0) /* M32R_FIXME */ diff --git a/arch/m32r/include/asm/mman.h b/arch/m32r/include/asm/mman.h index 04a5f40aa40..8eebf89f5ab 100644 --- a/arch/m32r/include/asm/mman.h +++ b/arch/m32r/include/asm/mman.h @@ -1,17 +1 @@ -#ifndef __M32R_MMAN_H__ -#define __M32R_MMAN_H__ - -#include <asm-generic/mman-common.h> - -#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ -#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ -#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ -#define MAP_LOCKED 0x2000 /* pages are locked */ -#define MAP_NORESERVE 0x4000 /* don't check for reservations */ -#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ -#define MAP_NONBLOCK 0x10000 /* do not block on IO */ - -#define MCL_CURRENT 1 /* lock all current mappings */ -#define MCL_FUTURE 2 /* lock all future mappings */ - -#endif /* __M32R_MMAN_H__ */ +#include <asm-generic/mman.h> diff --git a/arch/m32r/include/asm/mmu_context.h b/arch/m32r/include/asm/mmu_context.h index 91909e5dd9d..a70a3df3363 100644 --- a/arch/m32r/include/asm/mmu_context.h +++ b/arch/m32r/include/asm/mmu_context.h @@ -127,7 +127,7 @@ static inline void switch_mm(struct mm_struct *prev, if (prev != next) { #ifdef CONFIG_SMP - cpu_set(cpu, next->cpu_vm_mask); + cpumask_set_cpu(cpu, mm_cpumask(next)); #endif /* CONFIG_SMP */ /* Set MPTB = next->pgd */ *(volatile unsigned long *)MPTB = (unsigned long)next->pgd; @@ -135,7 +135,7 @@ static inline void switch_mm(struct mm_struct *prev, } #ifdef CONFIG_SMP else - if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) + if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) activate_context(next); #endif /* CONFIG_SMP */ } diff --git a/arch/m32r/include/asm/page.h b/arch/m32r/include/asm/page.h index 11777f7a562..725ede8f288 100644 --- a/arch/m32r/include/asm/page.h +++ b/arch/m32r/include/asm/page.h @@ -1,9 +1,11 @@ #ifndef _ASM_M32R_PAGE_H #define _ASM_M32R_PAGE_H +#include <linux/const.h> + /* PAGE_SHIFT determines the page size */ #define PAGE_SHIFT 12 -#define PAGE_SIZE (1UL << PAGE_SHIFT) +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) #ifndef __ASSEMBLY__ diff --git a/arch/m32r/include/asm/processor.h b/arch/m32r/include/asm/processor.h index 1a997fc148a..8397c249989 100644 --- a/arch/m32r/include/asm/processor.h +++ b/arch/m32r/include/asm/processor.h @@ -140,8 +140,6 @@ unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) ((tsk)->thread.lr) #define KSTK_ESP(tsk) ((tsk)->thread.sp) -#define THREAD_SIZE (2*PAGE_SIZE) - #define cpu_relax() barrier() #endif /* _ASM_M32R_PROCESSOR_H */ diff --git a/arch/m32r/include/asm/smp.h b/arch/m32r/include/asm/smp.h index b96a6d2ffbc..e67ded1aab9 100644 --- a/arch/m32r/include/asm/smp.h +++ b/arch/m32r/include/asm/smp.h @@ -88,7 +88,7 @@ extern void smp_send_timer(void); extern unsigned long send_IPI_mask_phys(cpumask_t, int, int); extern void arch_send_call_function_single_ipi(int cpu); -extern void arch_send_call_function_ipi(cpumask_t mask); +extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); #endif /* not __ASSEMBLY__ */ diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h index 71578151a40..ed240b6e8e7 100644 --- a/arch/m32r/include/asm/thread_info.h +++ b/arch/m32r/include/asm/thread_info.h @@ -55,6 +55,8 @@ struct thread_info { #define PREEMPT_ACTIVE 0x10000000 +#define THREAD_SIZE (PAGE_SIZE << 1) + /* * macros/functions for gaining access to the thread information structure */ @@ -76,8 +78,6 @@ struct thread_info { #define init_thread_info (init_thread_union.thread_info) #define init_stack (init_thread_union.stack) -#define THREAD_SIZE (2*PAGE_SIZE) - /* how to get the thread information struct from C */ static inline struct thread_info *current_thread_info(void) { @@ -125,17 +125,6 @@ static inline unsigned int get_thread_fault_code(void) return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT; } -#else /* !__ASSEMBLY__ */ - -#define THREAD_SIZE 8192 - -/* how to get the thread information struct from ASM */ -#define GET_THREAD_INFO(reg) GET_THREAD_INFO reg - .macro GET_THREAD_INFO reg - ldi \reg, #-THREAD_SIZE - and \reg, sp - .endm - #endif /* diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S index 612d35b082a..403869833b9 100644 --- a/arch/m32r/kernel/entry.S +++ b/arch/m32r/kernel/entry.S @@ -118,6 +118,13 @@ #define resume_kernel restore_all #endif +/* how to get the thread information struct from ASM */ +#define GET_THREAD_INFO(reg) GET_THREAD_INFO reg + .macro GET_THREAD_INFO reg + ldi \reg, #-THREAD_SIZE + and \reg, sp + .endm + ENTRY(ret_from_fork) pop r0 bl schedule_tail diff --git a/arch/m32r/kernel/head.S b/arch/m32r/kernel/head.S index 0a7194439eb..a46652dd83e 100644 --- a/arch/m32r/kernel/head.S +++ b/arch/m32r/kernel/head.S @@ -268,13 +268,13 @@ ENTRY(empty_zero_page) /*------------------------------------------------------------------------ * Stack area */ - .section .spi + .section .init.data, "aw" ALIGN .global spi_stack_top .zero 1024 spi_stack_top: - .section .spu + .section .init.data, "aw" ALIGN .global spu_stack_top .zero 1024 diff --git a/arch/m32r/kernel/init_task.c b/arch/m32r/kernel/init_task.c index fce57e5d3f9..6c42d5f8df5 100644 --- a/arch/m32r/kernel/init_task.c +++ b/arch/m32r/kernel/init_task.c @@ -20,9 +20,8 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); * way process stacks are handled. This is done by having a special * "init_task" linker map entry.. */ -union thread_union init_thread_union - __attribute__((__section__(".data.init_task"))) = - { INIT_THREAD_INFO(init_task) }; +union thread_union init_thread_union __init_task_data = + { INIT_THREAD_INFO(init_task) }; /* * Initial task structure. diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c index 22624b51d4d..700570747a9 100644 --- a/arch/m32r/kernel/m32r_ksyms.c +++ b/arch/m32r/kernel/m32r_ksyms.c @@ -23,12 +23,6 @@ EXPORT_SYMBOL(__ioremap); EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(kernel_thread); -/* Networking helper routines. */ -/* Delay loops */ -EXPORT_SYMBOL(__udelay); -EXPORT_SYMBOL(__delay); -EXPORT_SYMBOL(__const_udelay); - EXPORT_SYMBOL(strncpy_from_user); EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(clear_user); diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c index 98b8feb12ed..98682bba0ed 100644 --- a/arch/m32r/kernel/ptrace.c +++ b/arch/m32r/kernel/ptrace.c @@ -77,7 +77,7 @@ static int ptrace_read_user(struct task_struct *tsk, unsigned long off, struct user * dummy = NULL; #endif - if ((off & 3) || (off < 0) || (off > sizeof(struct user) - 3)) + if ((off & 3) || off > sizeof(struct user) - 3) return -EIO; off >>= 2; @@ -139,8 +139,7 @@ static int ptrace_write_user(struct task_struct *tsk, unsigned long off, struct user * dummy = NULL; #endif - if ((off & 3) || off < 0 || - off > sizeof(struct user) - 3) + if ((off & 3) || off > sizeof(struct user) - 3) return -EIO; off >>= 2; diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index 929e5c9d3ad..8a88f1f0a3e 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c @@ -17,6 +17,7 @@ #include <linux/irq.h> #include <linux/interrupt.h> +#include <linux/sched.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/smp.h> @@ -85,7 +86,7 @@ void smp_ipi_timer_interrupt(struct pt_regs *); void smp_local_timer_interrupt(void); static void send_IPI_allbutself(int, int); -static void send_IPI_mask(cpumask_t, int, int); +static void send_IPI_mask(const struct cpumask *, int, int); unsigned long send_IPI_mask_phys(cpumask_t, int, int); /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ @@ -113,7 +114,7 @@ unsigned long send_IPI_mask_phys(cpumask_t, int, int); void smp_send_reschedule(int cpu_id) { WARN_ON(cpu_is_offline(cpu_id)); - send_IPI_mask(cpumask_of_cpu(cpu_id), RESCHEDULE_IPI, 1); + send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1); } /*==========================================================================* @@ -168,7 +169,7 @@ void smp_flush_cache_all(void) spin_lock(&flushcache_lock); mask=cpus_addr(cpumask); atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); - send_IPI_mask(cpumask, INVALIDATE_CACHE_IPI, 0); + send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); _flush_cache_copyback_all(); while (flushcache_cpumask) mb(); @@ -264,7 +265,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm) preempt_disable(); cpu_id = smp_processor_id(); mmc = &mm->context[cpu_id]; - cpu_mask = mm->cpu_vm_mask; + cpu_mask = *mm_cpumask(mm); cpu_clear(cpu_id, cpu_mask); if (*mmc != NO_CONTEXT) { @@ -273,7 +274,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm) if (mm == current->mm) activate_context(mm); else - cpu_clear(cpu_id, mm->cpu_vm_mask); + cpumask_clear_cpu(cpu_id, mm_cpumask(mm)); local_irq_restore(flags); } if (!cpus_empty(cpu_mask)) @@ -334,7 +335,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va) preempt_disable(); cpu_id = smp_processor_id(); mmc = &mm->context[cpu_id]; - cpu_mask = mm->cpu_vm_mask; + cpu_mask = *mm_cpumask(mm); cpu_clear(cpu_id, cpu_mask); #ifdef DEBUG_SMP @@ -424,7 +425,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, * We have to send the IPI only to * CPUs affected. */ - send_IPI_mask(cpumask, INVALIDATE_TLB_IPI, 0); + send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0); while (!cpus_empty(flush_cpumask)) { /* nothing. lockup detection does not belong here */ @@ -469,7 +470,7 @@ void smp_invalidate_interrupt(void) if (flush_mm == current->active_mm) activate_context(flush_mm); else - cpu_clear(cpu_id, flush_mm->cpu_vm_mask); + cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm)); } else { unsigned long va = flush_va; @@ -546,14 +547,14 @@ static void stop_this_cpu(void *dummy) for ( ; ; ); } -void arch_send_call_function_ipi(cpumask_t mask) +void arch_send_call_function_ipi_mask(const struct cpumask *mask) { send_IPI_mask(mask, CALL_FUNCTION_IPI, 0); } void arch_send_call_function_single_ipi(int cpu) { - send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_IPI, 0); + send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0); } /*==========================================================================* @@ -729,7 +730,7 @@ static void send_IPI_allbutself(int ipi_num, int try) cpumask = cpu_online_map; cpu_clear(smp_processor_id(), cpumask); - send_IPI_mask(cpumask, ipi_num, try); + send_IPI_mask(&cpumask, ipi_num, try); } /*==========================================================================* @@ -752,7 +753,7 @@ static void send_IPI_allbutself(int ipi_num, int try) * ---------- --- -------------------------------------------------------- * *==========================================================================*/ -static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try) +static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try) { cpumask_t physid_mask, tmp; int cpu_id, phys_id; @@ -761,11 +762,11 @@ static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try) if (num_cpus <= 1) /* NO MP */ return; - cpus_and(tmp, cpumask, cpu_online_map); - BUG_ON(!cpus_equal(cpumask, tmp)); + cpumask_and(&tmp, cpumask, cpu_online_mask); + BUG_ON(!cpumask_equal(cpumask, &tmp)); physid_mask = CPU_MASK_NONE; - for_each_cpu_mask(cpu_id, cpumask){ + for_each_cpu(cpu_id, cpumask) { if ((phys_id = cpu_to_physid(cpu_id)) != -1) cpu_set(phys_id, physid_mask); } diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c index 2547d6c4a82..e034844cfc0 100644 --- a/arch/m32r/kernel/smpboot.c +++ b/arch/m32r/kernel/smpboot.c @@ -178,7 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++) physid_set(phys_id, phys_cpu_present_map); #ifndef CONFIG_HOTPLUG_CPU - cpu_present_map = cpu_possible_map; + init_cpu_present(&cpu_possible_map); #endif show_mp_info(nr_cpu); @@ -213,7 +213,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) if (!physid_isset(phys_id, phys_cpu_present_map)) continue; - if ((max_cpus >= 0) && (max_cpus <= cpucount + 1)) + if (max_cpus <= cpucount + 1) continue; do_boot_cpu(phys_id); diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c index cada3ba4b99..e7fee0f198d 100644 --- a/arch/m32r/kernel/time.c +++ b/arch/m32r/kernel/time.c @@ -33,6 +33,15 @@ #include <asm/hw_irq.h> +#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) +/* this needs a better home */ +DEFINE_SPINLOCK(rtc_lock); + +#ifdef CONFIG_RTC_DRV_CMOS_MODULE +EXPORT_SYMBOL(rtc_lock); +#endif +#endif /* pc-style 'CMOS' RTC support */ + #ifdef CONFIG_SMP extern void smp_local_timer_interrupt(void); #endif @@ -48,7 +57,7 @@ extern void smp_local_timer_interrupt(void); static unsigned long latch; -static unsigned long do_gettimeoffset(void) +u32 arch_gettimeoffset(void) { unsigned long elapsed_time = 0; /* [us] */ @@ -93,78 +102,9 @@ static unsigned long do_gettimeoffset(void) #error no chip configuration #endif - return elapsed_time; -} - -/* - * This version of gettimeofday has near microsecond resolution. - */ -void do_gettimeofday(struct timeval *tv) -{ - unsigned long seq; - unsigned long usec, sec; - unsigned long max_ntp_tick = tick_usec - tickadj; - - do { - seq = read_seqbegin(&xtime_lock); - - usec = do_gettimeoffset(); - - /* - * If time_adjust is negative then NTP is slowing the clock - * so make sure not to go into next possible interval. - * Better to lose some accuracy than have time go backwards.. - */ - if (unlikely(time_adjust < 0)) - usec = min(usec, max_ntp_tick); - - sec = xtime.tv_sec; - usec += (xtime.tv_nsec / 1000); - } while (read_seqretry(&xtime_lock, seq)); - - while (usec >= 1000000) { - usec -= 1000000; - sec++; - } - - tv->tv_sec = sec; - tv->tv_usec = usec; -} - -EXPORT_SYMBOL(do_gettimeofday); - -int do_settimeofday(struct timespec *tv) -{ - time_t wtm_sec, sec = tv->tv_sec; - long wtm_nsec, nsec = tv->tv_nsec; - - if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) - return -EINVAL; - - write_seqlock_irq(&xtime_lock); - /* - * This is revolting. We need to set "xtime" correctly. However, the - * value in this location is the value at the most recent update of - * wall time. Discover what correction gettimeofday() would have - * made, and then undo it! - */ - nsec -= do_gettimeoffset() * NSEC_PER_USEC; - - wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); - wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); - - set_normalized_timespec(&xtime, sec, nsec); - set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); - - ntp_clear(); - write_sequnlock_irq(&xtime_lock); - clock_was_set(); - - return 0; + return elapsed_time * 1000; } -EXPORT_SYMBOL(do_settimeofday); - /* * In order to set the CMOS clock precisely, set_rtc_mmss has to be * called 500 ms after the second nowtime has started, because when @@ -192,6 +132,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) #ifndef CONFIG_SMP profile_tick(CPU_PROFILING); #endif + /* XXX FIXME. Uh, the xtime_lock should be held here, no? */ do_timer(1); #ifndef CONFIG_SMP diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c index 03b14e55cd8..fbd109031df 100644 --- a/arch/m32r/kernel/traps.c +++ b/arch/m32r/kernel/traps.c @@ -104,8 +104,8 @@ static void set_eit_vector_entries(void) eit_vector[186] = (unsigned long)smp_call_function_interrupt; eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt; eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt; - eit_vector[189] = (unsigned long)smp_call_function_single_interrupt; - eit_vector[190] = 0; + eit_vector[189] = 0; /* CPU_BOOT_IPI */ + eit_vector[190] = (unsigned long)smp_call_function_single_interrupt; eit_vector[191] = 0; #endif _flush_cache_copyback_all(); diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S index de5e21cca6a..8ceb6181d80 100644 --- a/arch/m32r/kernel/vmlinux.lds.S +++ b/arch/m32r/kernel/vmlinux.lds.S @@ -4,6 +4,7 @@ #include <asm-generic/vmlinux.lds.h> #include <asm/addrspace.h> #include <asm/page.h> +#include <asm/thread_info.h> OUTPUT_ARCH(m32r) #if defined(__LITTLE_ENDIAN__) @@ -40,83 +41,22 @@ SECTIONS #endif _etext = .; /* End of text section */ - . = ALIGN(16); /* Exception table */ - __start___ex_table = .; - __ex_table : { *(__ex_table) } - __stop___ex_table = .; - + EXCEPTION_TABLE(16) RODATA - - /* writeable */ - .data : { /* Data */ - *(.spu) - *(.spi) - DATA_DATA - CONSTRUCTORS - } - - . = ALIGN(4096); - __nosave_begin = .; - .data_nosave : { *(.data.nosave) } - . = ALIGN(4096); - __nosave_end = .; - - . = ALIGN(32); - .data.cacheline_aligned : { *(.data.cacheline_aligned) } - + RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE) _edata = .; /* End of data section */ - . = ALIGN(8192); /* init_task */ - .data.init_task : { *(.data.init_task) } - /* will be freed after init */ - . = ALIGN(4096); /* Init code and data */ + . = ALIGN(PAGE_SIZE); /* Init code and data */ __init_begin = .; - .init.text : { - _sinittext = .; - INIT_TEXT - _einittext = .; - } - .init.data : { INIT_DATA } - . = ALIGN(16); - __setup_start = .; - .init.setup : { *(.init.setup) } - __setup_end = .; - __initcall_start = .; - .initcall.init : { - INITCALLS - } - __initcall_end = .; - __con_initcall_start = .; - .con_initcall.init : { *(.con_initcall.init) } - __con_initcall_end = .; - SECURITY_INIT - . = ALIGN(4); - __alt_instructions = .; - .altinstructions : { *(.altinstructions) } - __alt_instructions_end = .; - .altinstr_replacement : { *(.altinstr_replacement) } - /* .exit.text is discard at runtime, not link time, to deal with references - from .altinstructions and .eh_frame */ - .exit.text : { EXIT_TEXT } - .exit.data : { EXIT_DATA } - -#ifdef CONFIG_BLK_DEV_INITRD - . = ALIGN(4096); - __initramfs_start = .; - .init.ramfs : { *(.init.ramfs) } - __initramfs_end = .; -#endif - - PERCPU(4096) - . = ALIGN(4096); + INIT_TEXT_SECTION(PAGE_SIZE) + INIT_DATA_SECTION(16) + PERCPU(PAGE_SIZE) + . = ALIGN(PAGE_SIZE); __init_end = .; /* freed after init ends here */ - __bss_start = .; /* BSS */ - .bss : { *(.bss) } - . = ALIGN(4); - __bss_stop = .; + BSS_SECTION(0, 0, 4) _end = . ; diff --git a/arch/m32r/lib/delay.c b/arch/m32r/lib/delay.c index ced549be80f..940f4837e42 100644 --- a/arch/m32r/lib/delay.c +++ b/arch/m32r/lib/delay.c @@ -122,4 +122,8 @@ void __ndelay(unsigned long nsecs) { __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ } + +EXPORT_SYMBOL(__delay); +EXPORT_SYMBOL(__const_udelay); +EXPORT_SYMBOL(__udelay); EXPORT_SYMBOL(__ndelay); diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c index b7a78ad429b..5d2858f6eed 100644 --- a/arch/m32r/mm/discontig.c +++ b/arch/m32r/mm/discontig.c @@ -32,6 +32,9 @@ typedef struct { } mem_prof_t; static mem_prof_t mem_prof[MAX_NUMNODES]; +extern unsigned long memory_start; +extern unsigned long memory_end; + static void __init mem_prof_init(void) { unsigned long start_pfn, holes, free_pfn; @@ -42,7 +45,7 @@ static void __init mem_prof_init(void) /* Node#0 SDRAM */ mp = &mem_prof[0]; mp->start_pfn = PFN_UP(CONFIG_MEMORY_START); - mp->pages = PFN_DOWN(CONFIG_MEMORY_SIZE); + mp->pages = PFN_DOWN(memory_end - memory_start); mp->holes = 0; mp->free_pfn = PFN_UP(__pa(_end)); diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c index 24d429f9358..9f581df3952 100644 --- a/arch/m32r/mm/init.c +++ b/arch/m32r/mm/init.c @@ -171,7 +171,7 @@ void __init mem_init(void) printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " "%dk reserved, %dk data, %dk init)\n", - (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), + nr_free_pages() << (PAGE_SHIFT-10), num_physpages << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), diff --git a/arch/m32r/mm/mmu.S b/arch/m32r/mm/mmu.S index 49a6d16a3d5..e9491a5ae82 100644 --- a/arch/m32r/mm/mmu.S +++ b/arch/m32r/mm/mmu.S @@ -150,9 +150,13 @@ ENTRY(tme_handler) ; pmd = pmd_offset(pgd, address); ld r3, @r3 ; r3: pmd data - ldi r2, #-4096 beqz r3, 3f ; pmd_none(*pmd) ? + and3 r2, r3, #0xfff + add3 r2, r2, #-355 ; _KERNPG_TABLE(=0x163) + bnez r2, 3f ; pmd_bad(*pmd) ? + ldi r2, #-4096 + ; pte = pte_offset(pmd, address); and r2, r3 ; r2: pte base addr srl3 r3, r0, #10 @@ -263,9 +267,9 @@ ENTRY(tme_handler) ld r1, @r3 ; r1: pmd beqz r1, 3f ; pmd_none(*pmd) ? ; - and3 r1, r1, #0xeff - ldi r4, #611 ; _KERNPG_TABLE(=611) - bne r1, r4, 3f ; !pmd_bad(*pmd) ? + and3 r1, r1, #0x3ff + ldi r4, #0x163 ; _KERNPG_TABLE(=0x163) + bne r1, r4, 3f ; pmd_bad(*pmd) ? .fillinsn 4: |