diff options
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/cacheflush.h | 23 | ||||
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 26 | ||||
-rw-r--r-- | arch/arm/include/asm/hardware/cache-tauros2.h | 11 | ||||
-rw-r--r-- | arch/arm/include/asm/hardware/coresight.h | 165 | ||||
-rw-r--r-- | arch/arm/include/asm/hardware/iop3xx.h | 18 | ||||
-rw-r--r-- | arch/arm/include/asm/memory.h | 16 | ||||
-rw-r--r-- | arch/arm/include/asm/mman.h | 3 | ||||
-rw-r--r-- | arch/arm/include/asm/pgtable.h | 14 | ||||
-rw-r--r-- | arch/arm/include/asm/socket.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/swab.h | 19 | ||||
-rw-r--r-- | arch/arm/include/asm/system.h | 19 |
11 files changed, 281 insertions, 35 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 3d0cdd21b88..73eceb87e58 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -331,15 +331,15 @@ static inline void outer_flush_range(unsigned long start, unsigned long end) * Convert calls to our calling convention. */ #define flush_cache_all() __cpuc_flush_kern_all() -#ifndef CONFIG_CPU_CACHE_VIPT -static inline void flush_cache_mm(struct mm_struct *mm) + +static inline void vivt_flush_cache_mm(struct mm_struct *mm) { if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) __cpuc_flush_user_all(); } static inline void -flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) +vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), @@ -347,7 +347,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long } static inline void -flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) +vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) { if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { unsigned long addr = user_addr & PAGE_MASK; @@ -356,7 +356,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned l } static inline void -flush_ptrace_access(struct vm_area_struct *vma, struct page *page, +vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *kaddr, unsigned long len, int write) { @@ -365,6 +365,16 @@ flush_ptrace_access(struct vm_area_struct *vma, struct page *page, __cpuc_coherent_kern_range(addr, addr + len); } } + +#ifndef CONFIG_CPU_CACHE_VIPT +#define flush_cache_mm(mm) \ + vivt_flush_cache_mm(mm) +#define flush_cache_range(vma,start,end) \ + vivt_flush_cache_range(vma,start,end) +#define flush_cache_page(vma,addr,pfn) \ + vivt_flush_cache_page(vma,addr,pfn) +#define flush_ptrace_access(vma,page,ua,ka,len,write) \ + vivt_flush_ptrace_access(vma,page,ua,ka,len,write) #else extern void flush_cache_mm(struct mm_struct *mm); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); @@ -408,10 +418,9 @@ extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, * about to change to user space. This is the same method as used on SPARC64. * See update_mmu_cache for the user space part. */ +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 extern void flush_dcache_page(struct page *); -extern void __flush_dcache_page(struct address_space *mapping, struct page *page); - static inline void __flush_icache_all(void) { #ifdef CONFIG_ARM_ERRATA_411920 diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index ff46dfa68a9..a96300bf83f 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -15,20 +15,15 @@ * must not be used by drivers. */ #ifndef __arch_page_to_dma - -#if !defined(CONFIG_HIGHMEM) static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) { - return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page)); + return (dma_addr_t)__pfn_to_bus(page_to_pfn(page)); } -#elif defined(__pfn_to_bus) -static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) + +static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr) { - return (dma_addr_t)__pfn_to_bus(page_to_pfn(page)); + return pfn_to_page(__bus_to_pfn(addr)); } -#else -#error "this machine class needs to define __arch_page_to_dma to use HIGHMEM" -#endif static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) { @@ -45,6 +40,11 @@ static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) return __arch_page_to_dma(dev, page); } +static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr) +{ + return __arch_dma_to_page(dev, addr); +} + static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) { return __arch_dma_to_virt(dev, addr); @@ -257,9 +257,11 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); */ extern dma_addr_t dma_map_single(struct device *, void *, size_t, enum dma_data_direction); +extern void dma_unmap_single(struct device *, dma_addr_t, size_t, + enum dma_data_direction); extern dma_addr_t dma_map_page(struct device *, struct page *, unsigned long, size_t, enum dma_data_direction); -extern void dma_unmap_single(struct device *, dma_addr_t, size_t, +extern void dma_unmap_page(struct device *, dma_addr_t, size_t, enum dma_data_direction); /* @@ -352,7 +354,6 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, { /* nothing to do */ } -#endif /* CONFIG_DMABOUNCE */ /** * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() @@ -371,8 +372,9 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - dma_unmap_single(dev, handle, size, dir); + /* nothing to do */ } +#endif /* CONFIG_DMABOUNCE */ /** * dma_sync_single_range_for_cpu diff --git a/arch/arm/include/asm/hardware/cache-tauros2.h b/arch/arm/include/asm/hardware/cache-tauros2.h new file mode 100644 index 00000000000..538f17ca905 --- /dev/null +++ b/arch/arm/include/asm/hardware/cache-tauros2.h @@ -0,0 +1,11 @@ +/* + * arch/arm/include/asm/hardware/cache-tauros2.h + * + * Copyright (C) 2008 Marvell Semiconductor + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +extern void __init tauros2_init(void); diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h new file mode 100644 index 00000000000..f82b25d4f73 --- /dev/null +++ b/arch/arm/include/asm/hardware/coresight.h @@ -0,0 +1,165 @@ +/* + * linux/arch/arm/include/asm/hardware/coresight.h + * + * CoreSight components' registers + * + * Copyright (C) 2009 Nokia Corporation. + * Alexander Shishkin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_HARDWARE_CORESIGHT_H +#define __ASM_HARDWARE_CORESIGHT_H + +#define TRACER_ACCESSED_BIT 0 +#define TRACER_RUNNING_BIT 1 +#define TRACER_CYCLE_ACC_BIT 2 +#define TRACER_ACCESSED BIT(TRACER_ACCESSED_BIT) +#define TRACER_RUNNING BIT(TRACER_RUNNING_BIT) +#define TRACER_CYCLE_ACC BIT(TRACER_CYCLE_ACC_BIT) + +struct tracectx { + unsigned int etb_bufsz; + void __iomem *etb_regs; + void __iomem *etm_regs; + unsigned long flags; + int ncmppairs; + int etm_portsz; + struct device *dev; + struct clk *emu_clk; + struct mutex mutex; +}; + +#define TRACER_TIMEOUT 10000 + +#define etm_writel(t, v, x) \ + (__raw_writel((v), (t)->etm_regs + (x))) +#define etm_readl(t, x) (__raw_readl((t)->etm_regs + (x))) + +/* CoreSight Management Registers */ +#define CSMR_LOCKACCESS 0xfb0 +#define CSMR_LOCKSTATUS 0xfb4 +#define CSMR_AUTHSTATUS 0xfb8 +#define CSMR_DEVID 0xfc8 +#define CSMR_DEVTYPE 0xfcc +/* CoreSight Component Registers */ +#define CSCR_CLASS 0xff4 + +#define CSCR_PRSR 0x314 + +#define UNLOCK_MAGIC 0xc5acce55 + +/* ETM control register, "ETM Architecture", 3.3.1 */ +#define ETMR_CTRL 0 +#define ETMCTRL_POWERDOWN 1 +#define ETMCTRL_PROGRAM (1 << 10) +#define ETMCTRL_PORTSEL (1 << 11) +#define ETMCTRL_DO_CONTEXTID (3 << 14) +#define ETMCTRL_PORTMASK1 (7 << 4) +#define ETMCTRL_PORTMASK2 (1 << 21) +#define ETMCTRL_PORTMASK (ETMCTRL_PORTMASK1 | ETMCTRL_PORTMASK2) +#define ETMCTRL_PORTSIZE(x) ((((x) & 7) << 4) | (!!((x) & 8)) << 21) +#define ETMCTRL_DO_CPRT (1 << 1) +#define ETMCTRL_DATAMASK (3 << 2) +#define ETMCTRL_DATA_DO_DATA (1 << 2) +#define ETMCTRL_DATA_DO_ADDR (1 << 3) +#define ETMCTRL_DATA_DO_BOTH (ETMCTRL_DATA_DO_DATA | ETMCTRL_DATA_DO_ADDR) +#define ETMCTRL_BRANCH_OUTPUT (1 << 8) +#define ETMCTRL_CYCLEACCURATE (1 << 12) + +/* ETM configuration code register */ +#define ETMR_CONFCODE (0x04) + +/* ETM trace start/stop resource control register */ +#define ETMR_TRACESSCTRL (0x18) + +/* ETM trigger event register */ +#define ETMR_TRIGEVT (0x08) + +/* address access type register bits, "ETM architecture", + * table 3-27 */ +/* - access type */ +#define ETMAAT_IFETCH 0 +#define ETMAAT_IEXEC 1 +#define ETMAAT_IEXECPASS 2 +#define ETMAAT_IEXECFAIL 3 +#define ETMAAT_DLOADSTORE 4 +#define ETMAAT_DLOAD 5 +#define ETMAAT_DSTORE 6 +/* - comparison access size */ +#define ETMAAT_JAVA (0 << 3) +#define ETMAAT_THUMB (1 << 3) +#define ETMAAT_ARM (3 << 3) +/* - data value comparison control */ +#define ETMAAT_NOVALCMP (0 << 5) +#define ETMAAT_VALMATCH (1 << 5) +#define ETMAAT_VALNOMATCH (3 << 5) +/* - exact match */ +#define ETMAAT_EXACTMATCH (1 << 7) +/* - context id comparator control */ +#define ETMAAT_IGNCONTEXTID (0 << 8) +#define ETMAAT_VALUE1 (1 << 8) +#define ETMAAT_VALUE2 (2 << 8) +#define ETMAAT_VALUE3 (3 << 8) +/* - security level control */ +#define ETMAAT_IGNSECURITY (0 << 10) +#define ETMAAT_NSONLY (1 << 10) +#define ETMAAT_SONLY (2 << 10) + +#define ETMR_COMP_VAL(x) (0x40 + (x) * 4) +#define ETMR_COMP_ACC_TYPE(x) (0x80 + (x) * 4) + +/* ETM status register, "ETM Architecture", 3.3.2 */ +#define ETMR_STATUS (0x10) +#define ETMST_OVERFLOW (1 << 0) +#define ETMST_PROGBIT (1 << 1) +#define ETMST_STARTSTOP (1 << 2) +#define ETMST_TRIGGER (1 << 3) + +#define etm_progbit(t) (etm_readl((t), ETMR_STATUS) & ETMST_PROGBIT) +#define etm_started(t) (etm_readl((t), ETMR_STATUS) & ETMST_STARTSTOP) +#define etm_triggered(t) (etm_readl((t), ETMR_STATUS) & ETMST_TRIGGER) + +#define ETMR_TRACEENCTRL2 0x1c +#define ETMR_TRACEENCTRL 0x24 +#define ETMTE_INCLEXCL (1 << 24) +#define ETMR_TRACEENEVT 0x20 +#define ETMCTRL_OPTS (ETMCTRL_DO_CPRT | \ + ETMCTRL_DATA_DO_ADDR | \ + ETMCTRL_BRANCH_OUTPUT | \ + ETMCTRL_DO_CONTEXTID) + +/* ETB registers, "CoreSight Components TRM", 9.3 */ +#define ETBR_DEPTH 0x04 +#define ETBR_STATUS 0x0c +#define ETBR_READMEM 0x10 +#define ETBR_READADDR 0x14 +#define ETBR_WRITEADDR 0x18 +#define ETBR_TRIGGERCOUNT 0x1c +#define ETBR_CTRL 0x20 +#define ETBR_FORMATTERCTRL 0x304 +#define ETBFF_ENFTC 1 +#define ETBFF_ENFCONT (1 << 1) +#define ETBFF_FONFLIN (1 << 4) +#define ETBFF_MANUAL_FLUSH (1 << 6) +#define ETBFF_TRIGIN (1 << 8) +#define ETBFF_TRIGEVT (1 << 9) +#define ETBFF_TRIGFL (1 << 10) + +#define etb_writel(t, v, x) \ + (__raw_writel((v), (t)->etb_regs + (x))) +#define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x))) + +#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0) +#define etm_unlock(t) \ + do { etm_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0) + +#define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0) +#define etb_unlock(t) \ + do { etb_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0) + +#endif /* __ASM_HARDWARE_CORESIGHT_H */ + diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index 8d60ad267e3..5daea2961d4 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h @@ -234,7 +234,13 @@ extern int iop3xx_get_init_atu(void); void iop3xx_map_io(void); void iop_init_cp6_handler(void); void iop_init_time(unsigned long tickrate); -unsigned long iop_gettimeoffset(void); + +static inline u32 read_tmr0(void) +{ + u32 val; + asm volatile("mrc p6, 0, %0, c0, c1, 0" : "=r" (val)); + return val; +} static inline void write_tmr0(u32 val) { @@ -253,6 +259,11 @@ static inline u32 read_tcr0(void) return val; } +static inline void write_tcr0(u32 val) +{ + asm volatile("mcr p6, 0, %0, c2, c1, 0" : : "r" (val)); +} + static inline u32 read_tcr1(void) { u32 val; @@ -260,6 +271,11 @@ static inline u32 read_tcr1(void) return val; } +static inline void write_tcr1(u32 val) +{ + asm volatile("mcr p6, 0, %0, c3, c1, 0" : : "r" (val)); +} + static inline void write_trr0(u32 val) { asm volatile("mcr p6, 0, %0, c4, c1, 0" : : "r" (val)); diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index cefedf06213..5421d82a257 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -125,8 +125,10 @@ * private definitions which should NOT be used outside memory.h * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. */ +#ifndef __virt_to_phys #define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) #define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) +#endif /* * Convert a physical address to a Page Frame Number and back @@ -134,6 +136,12 @@ #define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT) #define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) +/* + * Convert a page to/from a physical address + */ +#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) +#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) + #ifndef __ASSEMBLY__ /* @@ -194,7 +202,8 @@ static inline void *phys_to_virt(unsigned long x) #ifndef __virt_to_bus #define __virt_to_bus __virt_to_phys #define __bus_to_virt __phys_to_virt -#define __pfn_to_bus(x) ((x) << PAGE_SHIFT) +#define __pfn_to_bus(x) __pfn_to_phys(x) +#define __bus_to_pfn(x) __phys_to_pfn(x) #endif static inline __deprecated unsigned long virt_to_bus(void *x) @@ -293,11 +302,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x) #endif /* !CONFIG_DISCONTIGMEM */ /* - * For BIO. "will die". Kill me when bio_to_phys() and bvec_to_phys() die. - */ -#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) - -/* * Optional coherency support. Currently used only by selected * Intel XSC3-based systems. */ diff --git a/arch/arm/include/asm/mman.h b/arch/arm/include/asm/mman.h index 8eebf89f5ab..41f99c573b9 100644 --- a/arch/arm/include/asm/mman.h +++ b/arch/arm/include/asm/mman.h @@ -1 +1,4 @@ #include <asm-generic/mman.h> + +#define arch_mmap_check(addr, len, flags) \ + (((flags) & MAP_FIXED && (addr) < FIRST_USER_ADDRESS) ? -EINVAL : 0) diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 201ccaa11f6..11397687f42 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -304,13 +304,23 @@ PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); static inline pte_t pte_mkspecial(pte_t pte) { return pte; } +#define __pgprot_modify(prot,mask,bits) \ + __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) + /* * Mark the prot value as uncacheable and unbufferable. */ #define pgprot_noncached(prot) \ - __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_UNCACHED) + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) #define pgprot_writecombine(prot) \ - __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_BUFFERABLE) + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) +#if __LINUX_ARM_ARCH__ >= 7 +#define pgprot_dmacoherent(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) +#else +#define pgprot_dmacoherent(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED) +#endif #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_present(pmd) (pmd_val(pmd)) diff --git a/arch/arm/include/asm/socket.h b/arch/arm/include/asm/socket.h index 92ac61d294f..90ffd04b8e7 100644 --- a/arch/arm/include/asm/socket.h +++ b/arch/arm/include/asm/socket.h @@ -60,4 +60,6 @@ #define SO_PROTOCOL 38 #define SO_DOMAIN 39 +#define SO_RXQ_OVFL 40 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/arm/include/asm/swab.h b/arch/arm/include/asm/swab.h index ca2bf2f6d6e..9997ad20eff 100644 --- a/arch/arm/include/asm/swab.h +++ b/arch/arm/include/asm/swab.h @@ -22,6 +22,24 @@ # define __SWAB_64_THRU_32__ #endif +#if defined(__KERNEL__) && __LINUX_ARM_ARCH__ >= 6 + +static inline __attribute_const__ __u16 __arch_swab16(__u16 x) +{ + __asm__ ("rev16 %0, %1" : "=r" (x) : "r" (x)); + return x; +} +#define __arch_swab16 __arch_swab16 + +static inline __attribute_const__ __u32 __arch_swab32(__u32 x) +{ + __asm__ ("rev %0, %1" : "=r" (x) : "r" (x)); + return x; +} +#define __arch_swab32 __arch_swab32 + +#else + static inline __attribute_const__ __u32 __arch_swab32(__u32 x) { __u32 t; @@ -48,3 +66,4 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x) #endif +#endif diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index d65b2f5bf41..058e7e90881 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -138,21 +138,26 @@ extern unsigned int user_debug; #define dmb() __asm__ __volatile__ ("" : : : "memory") #endif -#ifndef CONFIG_SMP +#if __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP) +#define mb() dmb() +#define rmb() dmb() +#define wmb() dmb() +#else #define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) #define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) #define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) +#endif + +#ifndef CONFIG_SMP #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() #else -#define mb() dmb() -#define rmb() dmb() -#define wmb() dmb() -#define smp_mb() dmb() -#define smp_rmb() dmb() -#define smp_wmb() dmb() +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() #endif + #define read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0) |