From ea63a11027103372f56bd2d9ee3c525be6edc9c1 Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Wed, 8 May 2013 14:29:05 +0100 Subject: ARM: ux500: Fix incorrect DEBUG UART virtual addresses A recent move to rid header files which were hindering multiplatform support forced address allocations out of the headers and into the files which were using them. We also lost some useful macros such as IO_ADDRESS(), so physical -> virtual addressing has been carried out manually in this case. Unfortunately the incorrect value was converted. This patch rectifies the error and ensures earlyprintk works again. Signed-off-by: Lee Jones Signed-off-by: Linus Walleij --- arch/arm/include/debug/ux500.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/debug/ux500.S b/arch/arm/include/debug/ux500.S index 2848857f5b6..fbd24beeb1f 100644 --- a/arch/arm/include/debug/ux500.S +++ b/arch/arm/include/debug/ux500.S @@ -24,9 +24,9 @@ #define U8500_UART0_PHYS_BASE (0x80120000) #define U8500_UART1_PHYS_BASE (0x80121000) #define U8500_UART2_PHYS_BASE (0x80007000) -#define U8500_UART0_VIRT_BASE (0xa8120000) -#define U8500_UART1_VIRT_BASE (0xa8121000) -#define U8500_UART2_VIRT_BASE (0xa8007000) +#define U8500_UART0_VIRT_BASE (0xf8120000) +#define U8500_UART1_VIRT_BASE (0xf8121000) +#define U8500_UART2_VIRT_BASE (0xf8007000) #define __UX500_PHYS_UART(n) U8500_UART##n##_PHYS_BASE #define __UX500_VIRT_UART(n) U8500_UART##n##_VIRT_BASE #endif -- cgit v1.2.3-70-g09d2 From 509eb76ebf9771abc9fe51859382df2571f11447 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 5 Jun 2013 11:20:33 +0100 Subject: ARM: 7747/1: pcpu: ensure __my_cpu_offset cannot be re-ordered across barrier() __my_cpu_offset is non-volatile, since we want its value to be cached when we access several per-cpu variables in a row with preemption disabled. This means that we rely on preempt_{en,dis}able to hazard with the operation via the barrier() macro, so that we can't end up migrating CPUs without reloading the per-cpu offset. Unfortunately, GCC doesn't treat a "memory" clobber on a non-volatile asm block as a side-effect, and will happily re-order it before other memory clobbers (including those in prempt_disable()) and cache the value. This has been observed to break the cmpxchg logic in the slub allocator, leading to livelock in kmem_cache_alloc in mainline kernels. This patch adds a dummy memory input operand to __my_cpu_offset, forcing it to be ordered with respect to the barrier() macro. Cc: Cc: Rob Herring Reviewed-by: Nicolas Pitre Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/percpu.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h index 968c0a14e0a..209e6504922 100644 --- a/arch/arm/include/asm/percpu.h +++ b/arch/arm/include/asm/percpu.h @@ -30,8 +30,15 @@ static inline void set_my_cpu_offset(unsigned long off) static inline unsigned long __my_cpu_offset(void) { unsigned long off; - /* Read TPIDRPRW */ - asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : : "memory"); + register unsigned long *sp asm ("sp"); + + /* + * Read TPIDRPRW. + * We want to allow caching the value, so avoid using volatile and + * instead use a fake stack read to hazard against barrier(). + */ + asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : "Q" (*sp)); + return off; } #define __my_cpu_offset __my_cpu_offset() -- cgit v1.2.3-70-g09d2 From 29eb77825cc7da8d45b642de2de3d423dc8a363f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 5 Jun 2013 12:26:50 +0200 Subject: arch, mm: Remove tlb_fast_mode() Since the introduction of preemptible mmu_gather TLB fast mode has been broken. TLB fast mode relies on there being absolutely no concurrency; it frees pages first and invalidates TLBs later. However now we can get concurrency and stuff goes *bang*. This patch removes all tlb_fast_mode() code; it was found the better option vs trying to patch the hole by entangling tlb invalidation with the scheduler. Cc: Thomas Gleixner Cc: Russell King Cc: Tony Luck Reported-by: Max Filippov Signed-off-by: Peter Zijlstra Signed-off-by: Linus Torvalds --- arch/arm/include/asm/tlb.h | 27 ++++----------------------- arch/ia64/include/asm/tlb.h | 41 ++++++++--------------------------------- include/asm-generic/tlb.h | 17 +---------------- mm/memory.c | 9 --------- 4 files changed, 13 insertions(+), 81 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 99a19512ee2..bdf2b8458ec 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -33,18 +33,6 @@ #include #include -/* - * We need to delay page freeing for SMP as other CPUs can access pages - * which have been removed but not yet had their TLB entries invalidated. - * Also, as ARMv7 speculative prefetch can drag new entries into the TLB, - * we need to apply this same delaying tactic to ensure correct operation. - */ -#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7) -#define tlb_fast_mode(tlb) 0 -#else -#define tlb_fast_mode(tlb) 1 -#endif - #define MMU_GATHER_BUNDLE 8 /* @@ -112,12 +100,10 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb) static inline void tlb_flush_mmu(struct mmu_gather *tlb) { tlb_flush(tlb); - if (!tlb_fast_mode(tlb)) { - free_pages_and_swap_cache(tlb->pages, tlb->nr); - tlb->nr = 0; - if (tlb->pages == tlb->local) - __tlb_alloc_page(tlb); - } + free_pages_and_swap_cache(tlb->pages, tlb->nr); + tlb->nr = 0; + if (tlb->pages == tlb->local) + __tlb_alloc_page(tlb); } static inline void @@ -178,11 +164,6 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { - if (tlb_fast_mode(tlb)) { - free_page_and_swap_cache(page); - return 1; /* avoid calling tlb_flush_mmu */ - } - tlb->pages[tlb->nr++] = page; VM_BUG_ON(tlb->nr > tlb->max); return tlb->max - tlb->nr; diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index c3ffe3e54ed..ef3a9de0195 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h @@ -46,12 +46,6 @@ #include #include -#ifdef CONFIG_SMP -# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) -#else -# define tlb_fast_mode(tlb) (1) -#endif - /* * If we can't allocate a page to make a big batch of page pointers * to work on, then just handle a few from the on-stack structure. @@ -60,7 +54,7 @@ struct mmu_gather { struct mm_struct *mm; - unsigned int nr; /* == ~0U => fast mode */ + unsigned int nr; unsigned int max; unsigned char fullmm; /* non-zero means full mm flush */ unsigned char need_flush; /* really unmapped some PTEs? */ @@ -103,6 +97,7 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS]; static inline void ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) { + unsigned long i; unsigned int nr; if (!tlb->need_flush) @@ -141,13 +136,11 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e /* lastly, release the freed pages */ nr = tlb->nr; - if (!tlb_fast_mode(tlb)) { - unsigned long i; - tlb->nr = 0; - tlb->start_addr = ~0UL; - for (i = 0; i < nr; ++i) - free_page_and_swap_cache(tlb->pages[i]); - } + + tlb->nr = 0; + tlb->start_addr = ~0UL; + for (i = 0; i < nr; ++i) + free_page_and_swap_cache(tlb->pages[i]); } static inline void __tlb_alloc_page(struct mmu_gather *tlb) @@ -167,20 +160,7 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_m tlb->mm = mm; tlb->max = ARRAY_SIZE(tlb->local); tlb->pages = tlb->local; - /* - * Use fast mode if only 1 CPU is online. - * - * It would be tempting to turn on fast-mode for full_mm_flush as well. But this - * doesn't work because of speculative accesses and software prefetching: the page - * table of "mm" may (and usually is) the currently active page table and even - * though the kernel won't do any user-space accesses during the TLB shoot down, a - * compiler might use speculation or lfetch.fault on what happens to be a valid - * user-space address. This in turn could trigger a TLB miss fault (or a VHPT - * walk) and re-insert a TLB entry we just removed. Slow mode avoids such - * problems. (We could make fast-mode work by switching the current task to a - * different "mm" during the shootdown.) --davidm 08/02/2002 - */ - tlb->nr = (num_online_cpus() == 1) ? ~0U : 0; + tlb->nr = 0; tlb->fullmm = full_mm_flush; tlb->start_addr = ~0UL; } @@ -214,11 +194,6 @@ static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { tlb->need_flush = 1; - if (tlb_fast_mode(tlb)) { - free_page_and_swap_cache(page); - return 1; /* avoid calling tlb_flush_mmu */ - } - if (!tlb->nr && tlb->pages == tlb->local) __tlb_alloc_page(tlb); diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index b1b1fa6ffff..13821c339a4 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -97,11 +97,9 @@ struct mmu_gather { unsigned long start; unsigned long end; unsigned int need_flush : 1, /* Did free PTEs */ - fast_mode : 1; /* No batching */ - /* we are in the middle of an operation to clear * a full mm and can make some optimizations */ - unsigned int fullmm : 1, + fullmm : 1, /* we have performed an operation which * requires a complete flush of the tlb */ need_flush_all : 1; @@ -114,19 +112,6 @@ struct mmu_gather { #define HAVE_GENERIC_MMU_GATHER -static inline int tlb_fast_mode(struct mmu_gather *tlb) -{ -#ifdef CONFIG_SMP - return tlb->fast_mode; -#else - /* - * For UP we don't need to worry about TLB flush - * and page free order so much.. - */ - return 1; -#endif -} - void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm); void tlb_flush_mmu(struct mmu_gather *tlb); void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, diff --git a/mm/memory.c b/mm/memory.c index 6dc1882fbd7..61a262b08e5 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -220,7 +220,6 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) tlb->start = -1UL; tlb->end = 0; tlb->need_flush = 0; - tlb->fast_mode = (num_possible_cpus() == 1); tlb->local.next = NULL; tlb->local.nr = 0; tlb->local.max = ARRAY_SIZE(tlb->__pages); @@ -244,9 +243,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb) tlb_table_flush(tlb); #endif - if (tlb_fast_mode(tlb)) - return; - for (batch = &tlb->local; batch; batch = batch->next) { free_pages_and_swap_cache(batch->pages, batch->nr); batch->nr = 0; @@ -288,11 +284,6 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) VM_BUG_ON(!tlb->need_flush); - if (tlb_fast_mode(tlb)) { - free_page_and_swap_cache(page); - return 1; /* avoid calling tlb_flush_mmu() */ - } - batch = tlb->active; batch->pages[batch->nr++] = page; if (batch->nr == batch->max) { -- cgit v1.2.3-70-g09d2 From 1bc39742aab09248169ef9d3727c9def3528b3f3 Mon Sep 17 00:00:00 2001 From: Simon Baatz Date: Mon, 10 Jun 2013 21:10:12 +0100 Subject: ARM: 7755/1: handle user space mapped pages in flush_kernel_dcache_page Commit f8b63c1 made flush_kernel_dcache_page a no-op assuming that the pages it needs to handle are kernel mapped only. However, for example when doing direct I/O, pages with user space mappings may occur. Thus, continue to do lazy flushing if there are no user space mappings. Otherwise, flush the kernel cache lines directly. Signed-off-by: Simon Baatz Reviewed-by: Catalin Marinas Cc: # 3.2+ Signed-off-by: Russell King --- arch/arm/include/asm/cacheflush.h | 4 +--- arch/arm/mm/flush.c | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 3 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index bff71388e72..17d0ae8672f 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -320,9 +320,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma, } #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE -static inline void flush_kernel_dcache_page(struct page *page) -{ -} +extern void flush_kernel_dcache_page(struct page *); #define flush_dcache_mmap_lock(mapping) \ spin_lock_irq(&(mapping)->tree_lock) diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 0d473cce501..32aa5861119 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -300,6 +300,39 @@ void flush_dcache_page(struct page *page) } EXPORT_SYMBOL(flush_dcache_page); +/* + * Ensure cache coherency for the kernel mapping of this page. We can + * assume that the page is pinned via kmap. + * + * If the page only exists in the page cache and there are no user + * space mappings, this is a no-op since the page was already marked + * dirty at creation. Otherwise, we need to flush the dirty kernel + * cache lines directly. + */ +void flush_kernel_dcache_page(struct page *page) +{ + if (cache_is_vivt() || cache_is_vipt_aliasing()) { + struct address_space *mapping; + + mapping = page_mapping(page); + + if (!mapping || mapping_mapped(mapping)) { + void *addr; + + addr = page_address(page); + /* + * kmap_atomic() doesn't set the page virtual + * address for highmem pages, and + * kunmap_atomic() takes care of cache + * flushing already. + */ + if (!IS_ENABLED(CONFIG_HIGHMEM) || addr) + __cpuc_flush_dcache_area(addr, PAGE_SIZE); + } + } +} +EXPORT_SYMBOL(flush_kernel_dcache_page); + /* * Flush an anonymous page so that users of get_user_pages() * can safely access the data. The expected sequence is: -- cgit v1.2.3-70-g09d2 From 18d7f152df31e5a326301fdaad385e40874dff80 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Wed, 19 Jun 2013 10:40:48 +0100 Subject: ARM: 7763/1: kernel: fix __cpu_logical_map default initialization The __cpu_logical_map array is statically initialized to 0, which is a valid MPIDR value. To prevent issues with the current implementation, this patch defines an MPIDR_INVALID value, and statically initializes the __cpu_logical_map[] array to it. Entries in the arm_dt_init_cpu_maps() tmp_map array used to stash DT reg properties while parsing DT are initialized with the MPIDR_INVALID value as well for consistency. Signed-off-by: Lorenzo Pieralisi Acked-by: Nicolas Pitre Cc: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/cputype.h | 2 ++ arch/arm/include/asm/smp_plat.h | 2 +- arch/arm/kernel/devtree.c | 2 +- arch/arm/kernel/setup.c | 2 +- 4 files changed, 5 insertions(+), 3 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 7652712d1d1..dba62cb1ad0 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -32,6 +32,8 @@ #define MPIDR_HWID_BITMASK 0xFFFFFF +#define MPIDR_INVALID (~MPIDR_HWID_BITMASK) + #define MPIDR_LEVEL_BITS 8 #define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1) diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index aaa61b6f50f..e7898320273 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h @@ -49,7 +49,7 @@ static inline int cache_ops_need_broadcast(void) /* * Logical CPU mapping. */ -extern int __cpu_logical_map[]; +extern u32 __cpu_logical_map[]; #define cpu_logical_map(cpu) __cpu_logical_map[cpu] /* * Retrieve logical cpu index corresponding to a given MPIDR[23:0] diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index 904cad5ec65..0905502bee1 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c @@ -82,7 +82,7 @@ void __init arm_dt_init_cpu_maps(void) u32 i, j, cpuidx = 1; u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0; - u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = UINT_MAX }; + u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID }; bool bootcpu_valid = false; cpus = of_find_node_by_path("/cpus"); diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 1522c7ae31b..b4b1d397592 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -444,7 +444,7 @@ void notrace cpu_init(void) : "r14"); } -int __cpu_logical_map[NR_CPUS]; +u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID }; void __init smp_setup_processor_id(void) { -- cgit v1.2.3-70-g09d2 From 3e0a07f8c401bb43e0f964c5f1285b2cb2028645 Mon Sep 17 00:00:00 2001 From: Gregory CLEMENT Date: Sun, 23 Jun 2013 10:17:11 +0100 Subject: ARM: 7773/1: PJ4B: Add support for errata 4742 This commit fixes the regression on Armada 370 (the kernal hang during boot) introduced by the commit: "ARM: 7691/1: mm: kill unused TLB_CAN_READ_FROM_L1_CACHE and use ALT_SMP instead". When coming out of either a Wait for Interrupt (WFI) or a Wait for Event (WFE) IDLE states, a specific timing sensitivity exists between the retiring WFI/WFE instructions and the newly issued subsequent instructions. This sensitivity can result in a CPU hang scenario. The workaround is to insert either a Data Synchronization Barrier (DSB) or Data Memory Barrier (DMB) command immediately after the WFI/WFE instruction. This commit was based on the work of Lior Amsalem, but heavily modified to apply the errata fix dynamically according to the processor type thanks to the suggestions of Russell King and Nicolas Pitre. Signed-off-by: Gregory CLEMENT Reviewed-by: Will Deacon Acked-by: Nicolas Pitre Tested-by: Willy Tarreau Cc: Signed-off-by: Russell King --- arch/arm/Kconfig | 14 ++++++++++++++ arch/arm/include/asm/glue-proc.h | 9 +++++++++ arch/arm/mm/proc-macros.S | 5 +++++ arch/arm/mm/proc-v7.S | 34 +++++++++++++++++++++++++++++++--- 4 files changed, 59 insertions(+), 3 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 2651b1da1c5..136f263ed47 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1087,6 +1087,20 @@ if !MMU source "arch/arm/Kconfig-nommu" endif +config PJ4B_ERRATA_4742 + bool "PJ4B Errata 4742: IDLE Wake Up Commands can Cause the CPU Core to Cease Operation" + depends on CPU_PJ4B && MACH_ARMADA_370 + default y + help + When coming out of either a Wait for Interrupt (WFI) or a Wait for + Event (WFE) IDLE states, a specific timing sensitivity exists between + the retiring WFI/WFE instructions and the newly issued subsequent + instructions. This sensitivity can result in a CPU hang scenario. + Workaround: + The software must insert either a Data Synchronization Barrier (DSB) + or Data Memory Barrier (DMB) command immediately after the WFI/WFE + instruction + config ARM_ERRATA_326103 bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory" depends on CPU_V6 diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h index ac1dd54724b..8017e94acc5 100644 --- a/arch/arm/include/asm/glue-proc.h +++ b/arch/arm/include/asm/glue-proc.h @@ -230,6 +230,15 @@ # endif #endif +#ifdef CONFIG_CPU_PJ4B +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_pj4b +# endif +#endif + #ifndef MULTI_CPU #define cpu_proc_init __glue(CPU_NAME,_proc_init) #define cpu_proc_fin __glue(CPU_NAME,_proc_fin) diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index f9a0aa725ea..e3c48a3fe06 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -333,3 +333,8 @@ ENTRY(\name\()_tlb_fns) .endif .size \name\()_tlb_fns, . - \name\()_tlb_fns .endm + +.macro globl_equ x, y + .globl \x + .equ \x, \y +.endm diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 4c8c9c10a38..e35fec34453 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -138,6 +138,29 @@ ENTRY(cpu_v7_do_resume) mov r0, r8 @ control register b cpu_resume_mmu ENDPROC(cpu_v7_do_resume) +#endif + +#ifdef CONFIG_CPU_PJ4B + globl_equ cpu_pj4b_switch_mm, cpu_v7_switch_mm + globl_equ cpu_pj4b_set_pte_ext, cpu_v7_set_pte_ext + globl_equ cpu_pj4b_proc_init, cpu_v7_proc_init + globl_equ cpu_pj4b_proc_fin, cpu_v7_proc_fin + globl_equ cpu_pj4b_reset, cpu_v7_reset +#ifdef CONFIG_PJ4B_ERRATA_4742 +ENTRY(cpu_pj4b_do_idle) + dsb @ WFI may enter a low-power mode + wfi + dsb @barrier + mov pc, lr +ENDPROC(cpu_pj4b_do_idle) +#else + globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle +#endif + globl_equ cpu_pj4b_dcache_clean_area, cpu_v7_dcache_clean_area + globl_equ cpu_pj4b_do_suspend, cpu_v7_do_suspend + globl_equ cpu_pj4b_do_resume, cpu_v7_do_resume + globl_equ cpu_pj4b_suspend_size, cpu_v7_suspend_size + #endif __CPUINIT @@ -350,6 +373,9 @@ __v7_setup_stack: @ define struct processor (see and proc-macros.S) define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 +#ifdef CONFIG_CPU_PJ4B + define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 +#endif .section ".rodata" @@ -362,7 +388,7 @@ __v7_setup_stack: /* * Standard v7 proc info content */ -.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0 +.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags) ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ @@ -375,7 +401,7 @@ __v7_setup_stack: .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \ HWCAP_EDSP | HWCAP_TLS | \hwcaps .long cpu_v7_name - .long v7_processor_functions + .long \proc_fns .long v7wbi_tlb_fns .long v6_user_fns .long v7_cache_fns @@ -407,12 +433,14 @@ __v7_ca9mp_proc_info: /* * Marvell PJ4B processor. */ +#ifdef CONFIG_CPU_PJ4B .type __v7_pj4b_proc_info, #object __v7_pj4b_proc_info: .long 0x560f5800 .long 0xff0fff00 - __v7_proc __v7_pj4b_setup + __v7_proc __v7_pj4b_setup, proc_fns = pj4b_processor_functions .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info +#endif /* * ARM Ltd. Cortex A7 processor. -- cgit v1.2.3-70-g09d2